.ISO8859-1P&\&\"\]]]' ]C']d *] (] (] ^ *^*^U3^k3^$^"^8_!_T#_v,_:_`!`!`C`b$`-`J` Ja !3ak"'a#=a$!b% b'&"bH'+bk(%b)b*b+b,)c-8c@.$cy/#c0c1c2&c3d4 d=5(dI62dr7(d8d9d: d;8e <=eD=Ce>Xe?Lf@flA!fB"fC>fD-gEg=FgTGgcH9gI gJ"gKBgLrh-MhN4hO/hPi#Q#i@R3idSiT!jCU,jeV,jWjX"jY+jZ+k'[kS\ks_(ka0kbAkcAl/dBlqeAlfAlgAm8hAmziAmjAmkn@lBnTm!nnnonp?nqor.o8s+ogtou/ov3ow3px$p<y.paz(p{,p|,p}Eq~rYr[r]r_rarcrergrirzrrrr r1s ,s= sj*s,s2s2t)tIts;tDt9uBuN'uHu^v=va4v=v7w3wJw~w5w"w"x%x#)xI3xs x#x#x1x$y-yR yp/y y!yyz=z<{0{=/{n{{{#{||3|R |jB}v }}/}~%~#%~IQ~o'~3~4R *n ,  ; ="4`=/&2*(]"6< "M(:)?*ZO+9,%-% .$0/$U1z2Q6(8:P;l<.=d>. ?5OE$FjG3HII>J2]OT",^=O_` :F V,c $$2.azF0GGH*&!*L3cC9@.@o0<5Q@]0&'W; !"9W7o=: 9Tr ) # + ,& So57KT&-',()&*:+'J,&r-C./081K2g31i4:^;: <FH=:>8?DRE5iF.GVH8%I3^JRK,L.MVAN8O=XY@Z7Y[7\8b?c2BgNuh@lGm5Mr;vHwF{*O|Dz},, \8QGO/PD(*4SV!&3gZcj&E@-7edW'5npSm 2 =3H|H BU-G$31208cQH:74rWN^N19ATA?<$U+z,tQHvABS/2 / 2)J\N/v2/l 2vnBT[`4;FA O!:"FO#$0@%q&'. (W9)W*,+/60gL1b2e3d}45687 N8Ko9A:/;C-<Bq=>>>?!2@&TH{R2S+T+U4#VX\$q]-^,_,a:b.Yc%ˆd(®f;h&j+:lHfn%ï2O2XEŋ(onjM'-($M_l,ʼ?hOA͸c^Z4DJy[ =гvh4D$i;yҵHq3ZԎ&ԭ'/YW+ձ .=-W&օ,֬!%5!!W5y7ׯ!5 .?!n*ؐ)ػ7)9GCف))*&D*k(ږ7ڿ(5 7Vێ۞۷  (3Pg|ܖܥ*0CXm݇ݤ*0+  L 'm ޕ 8ޭ (3CVs.z,4 &'@QhT+)"U 0x!",#0$#%(?&h'"(;)!*+,7-P.g/}012-3$4+!5M6$i78'9 :);%<+ G ' _s * : 9 E D ; 1 1 R< @ O D ?e = T O8 V [ &| # . . % !F 9h @ ' ! 4- b %~  ] 1" T $n R ) + (< Se . @ ) { b~  6 $ #E i k >x 7 m 5] O    O  'q I $ 0 m9  T rKA 'H,Cu*"7<%8'b9:<;<&>!(?J@%_AB CDEFGA#H'eIJUK7L[3.%)=QnG01D'v$<     *" >% a# % 1 % ) )D S@ 8 > ? @ A' HC. pD3 E1 F*G60HgIfJfmKfL;MmSE.T=tW\P]J^?__=!UqwO693p%K83lqek7D 0N51A5)'_$7'1 />@n@?0S7bTcC K T :H Ja04N//>g"pO"  )! ? a* | N 0!/!C?!s!!S!%"))"O"y/"3"3".#*7#YC1#D%#E3#M0$N2$NO%$W$X#$Y&$Z*%[%=\=%[]>%a%b"%c%&d)&Ae&kfA&gB&k6' l'Dm'^u0'r/''"'#('(9"(a&(((@(3)&*)Z0)d!)eH)*!6*;J*rQ*T+Q+d+,<C-P-L-/L=/?0"C0b0R1Z211, 1.&10 2&22G92a@2i2Z3F;3@3V4O4uM4`5d5tQ5T6+V6T67,L7H8A8W889L;N;`>;h;j=Wn=m>1K>.>3?q?N?VBNB^CaCC &C  D <E =E\ EE.FS3FWF(G,G7DGd-G&GGbH,I1ICMIu@I)KjK.{K kL!?L"L#/MN$M~%LN&LNg'~N(:O3)LOn*7O+O,yP-BRI.gR/0S0V%1fW>2$W3W4W5WX6'X`7X8X9 X:X;X<GYXEY.Z/Z=8ZmZ#Z(Z[  [([=%[f.[[&['[(\&3\O)\\-\5\3],"]`(]"]] ]%7]^7G^J_CS_,_b`8`rA`2`a :a?az/a0a4aFb,EbsIbc%c"%cH,cnLEcM;cN)dOdGP#dV8e W(eDXDemYteZQf'fyg uhPidijKk?k8lylSAlqmm'mmmm%nn<n\xWnvyGoz.p{[pE|8p}7p~+q-q>%ql9q>q3r Dr?;r1r)r=s:sZ8s9s;t;tD:t7t5t6u)8u`8u4u/v2v71vj@vv v=vw/DwCEwGwKx%xb.x2xJx>y5yty{ yyyyyyyyyyyyy yyyyz/{ 8{9S{r-{){||}K6}S~<~g~=~@S~rS|}$~('.E!t#(#-#2#7(#8$'#9'L#:Ft#;7#<[#=HO#>R#?&#@#A1#B;O#C%#D#E<#F #G )#H%J#I)p#Jw#KJ#L']#M>#N;#OP#P-Q#QF#R#T#U6#V)-#W1W#XQ#YE#ZQ!#[s#\8#]#_#`!#a$#bB#c!]#d#e$#f#g'#h#i#j ;#k$\#lT#m0#n7#o ?#p'`#qH#r##s5#t/+#u,[#vq#w)#x,$#y/Q#zq#{;#|&/#}+V#~## #)#&#* #)K#5u#)#3#} #&#G#,#+##+O#{#5#C#?#7S#'##?#4#CG#)#(#4#]#q####3#(##"# F#+g##N##%#C#'\#T#M#)'#1Q#?##z#D #O#.#S#PZ#^#P #4[#F#6#j#+y#>#&# #(*#hS##-#S#@#/#7#2##w#3$Te$U;5$V1q$W$X77$Y:o$Z>$[D$\*.$](Y$^@$_G$`. $a:$bOW$c5$dN$e4,$f5a$gF$h'$i,$jP3$k$lQ7$m$n9J$p,$q$rIj$sU$t' $u/2$v0b$w0$x'$y$z${$|;7$}:s$~1$3$n$\$$F$$v$0W$&$*$$$u$=Q$C$F$,$.G$v$.$$|$q$Xq$>$E $6O$Z$Z$Z<$,$6$P$ZL$($@$G$DY$$J$$V$5$uE$$D%%n%R%<% V%!%"% o%z%~%Ȅ%ȍ%Ȗ% ȟ%ȩ%ȱ% ȸ%%%%%2%P%d%t%^ɇ%C%L*%0w%ʨ%=%A%>2%Bq%I̴%C%<B%8%͸%?%%%*;%&f%!΍%)ί%.%!%.*%@Y%IϚ%$%: %ND%'Г%)л%#%. %:8%4s%1Ѩ%%ҫ%9ҽ%%%%DӪ%C%@3%9t%8Ԯ%E%-%Iպ%X%(]%$ֆ%֫%Y%%<%z %9ل%پ%%%%%%k.%=ښ%;%H%G]%eۥ%i %.u%Sܤ%T%MM%ݛ%K-&'y&&ޡ&'&I&D:&4&6ߴ&,&)& GB& F& >& ?& .P&0&R&<&V@&N&D&?+&Uk&&HF&'&&&&& &!&"&#&$&%&&&'&( &)&* %&+ /&,9&-A&.H&/WQ&0&1&2&3!&4/ &5<&6X&7)^&8.&9&:&;&<&= &>2&?"G&@ j&A&B3&G&H&I &J'2&T3Z&U&V&W&X&Z'&[&\&] .&^<&_M&`d&a &b&c1&d&g&h &i&j0&kP&lk&m&n&o%&p&q&r&s&t+&uA&v W&w&a&x&y&z&{&|&}\&~)F&'p&&@&:&1&<-&/j&8&&1&-$&R&1&G'''w'M'a'r''N'8'?'i_' 'P'&'.(' AW'!7'"D'#G'$C^'%d'&'BY'C'D'E,n'F'Gr'H 'Ic'JY'K('L'MC$'Nch't!'u4'vL#'wFp'6(<(= (>6*(?}a(@;((I(8(!(()(L$([q(0(!(- (ZN(N(Q(8J(8(/(n(L[(F(J(:( (f )t ) }) K)f ) B)  )  ) VG) )@k)>)c))O)Xy)?)I)P\)3)6);)9T);)L)4)6L)) < )!<G)"=)#<)$X)%*X)&@),)0/g)h*)i4)j8)k0)l'D)m2l)n)o)+)pU)q9+)r4e)s/)t.)}7)~`1):)J)^)zw)6)v))\)0).))?))IT))3 %)+ Y) )e!)<!)!)1"`)")r#!);#)]#)l$.)p$)% )[%)*%)0&)8&P):&)'&)&&)M')3'a)M')T')(8))))))).* )*8)#*W)#*{)1*)L*)I+)+h),5),H)",)0,)g-!).)$/)./B)]/q)\/)0,)"0)0)E1 )'1O*B1w*+1*O2*]36*a3*l3*,4c*G4*a4* 5:* o50?6+)6kX6*6)797C7} G8c {8 E9'9m$9&9 9"98:":M):p::;6_;K a;!E< "2L(>)N?Z*P?+~?,s@y-@.kA/B40C1D2BEq3F4H5/I#6}IS:yI;QJK<J8KAKzKmKLL4"L5L M ^M" 1M M MMMN(N/*NX'N)NN#N;OO< OQKR'R8SS>PS}T" &T T {U .U V-nX=XLXdX| X2XXX X "Y #Y1 YU %Yv2Y#Y"Y#ZOZ: Z'ZZZ Z ['[(6[P3[[[[[ \! \"'\ #\H$\_% \s& \'\(\)\* \+\,1\-].."]N/]q0 ]1]2]3]4]5]6^7%^%8^K9^^:^v;^<^=^> ^?&^@^dD_e4_Xf5_g5_h=_i.`7jR`fk<`lV`mNaMn?ao=apObqbjr%bsctcucvc#wc+xc3yc;zcD{cK|cQ}cZ~cacjcqcy c ccccWcd d%d7dQ!dh/ddd)d,e 2e3sef;e(Hf2.f_<;fF-fBf >g;'gzJgIg<h7#3ht2[hd,li)l1nNl[s/lx(l}.mgm2mJm1m8n+ nd no@n|6nDnGo92o1oDo4p+,+p` &p p p -p(q-q=)qkHqqqr|r Tr!]r"OsG#.s$>s&t3tQtWu|u]uvlGvJwFXw Ow ex: [x ix pyf1y4z z>vz({J{s9{{f|}}-~p3~~~ tNcYHIE20 3 ;' Hc C D?5Euu1kN5;<,:iD E / f B c!&uH3.3!;U/KN ;\ 1R-XKER\=Q  s E s <IQ?IV%E|UPSi  Fb y#)/7  ?,lw# G!Mg"R#A$iJ%&2'( )w*Nh+z,H2-I{.O/909O1O2Z3z44*5.61 7+;8g9:;~<85=,n>_?@pAy*BCjDKE^Fk7!ct#|9\)7"]aY],;hC6C z a Hy j f-DcD<*TtP#(%:`OM'%&$L +q " " / ?S&HM`r_!$ RT]Z_7^Ppx  !%JX'6 KO$-t< : L &g AŽ J/SK.ßQ, *MKxSMKf)ŲOR,S&ad ef3oK#pIoqaɹreslʁtcuRvwryKVeqͼ .<KG*,5.Q505Ї2Qн@IAMYB>ѧCBJD)KHnLCҷMG&C0j ӛ)Ӽ-3<H,ԅ7Բ;1&XX[ձF FTU֛PWBeךturPl3ٽ2q$`ږf[^JۺfFlbܳOSfKݺOXV\ޯT Xa Pߺ   U \S K855Qkfy$5Qb&FV6ReaINC`G U( (B)Y*D!+f,-.\/0)Y*CHn/"(>>Z : \ *1 \ 3>#8b-)3&'$N+s"#(4]!oZQ >!u"BK#F$&%$&%!'4G()|)D*A+A-,Co-C.~/zv0;1-2S3B4H56+V7@8J9/:9>;)x<z=B>T`?@-A:B=;CYyDFEF'GDH4IPJ`K^5L;M:Nz OeP6Qd#R<SDTm U1xV)W)X"Y!**C:& #& J8 q0 & &  & ) ) P * z0 8 : * JC uM   ! 61 X!  4 M / C> s5 R ;!*"#+$x%&I'.g()K*t,Dp-+EK2:U)U1-J "* CM ! , 1>9Q K9y:E649k'_H-vQc5/- KI!3"!#0$6%S&Tk'H(b )c l*? +4!,=!E-8!.*!/H!0;"01("l2!"3N"4M#5N#T6;#7J#8$*9'$:H%;(%]<(%=(%>(%?&@)&A2&IED&|F,&G&&HD'I?'ZJ!'K!'L/'M-(N,(<O:(iP&(QZ(R`)&SM)TD)UA*V4*\Z#*[-*\%*](+ ^+2_(+D`(+ma)+b+c(+e(,f),)gA,ShI,i,j<,k;-:l.-vmH-n -o.p./q.OrG.os9.tJ.u/<v/w,0xo05y0zw52{D5|X5}46H~<6}666A78w=#h=>9>X>?;,vV?c@@:@A@A!4F6GiG9 hG FH WHS fH HIVI[eIGJ.J`,JZJ[KbKscK#L:#L^kLLM3M2N 2N<:No >N!N"3N#2O/$2Ob%2O&;O'2P(2P7)2Pj*KP+2P,2Q-:QO.;Q/6Q0\Q1RZ22Rx32R42R52S6lSD72S83S9:T:TS;Tj<2T=2T>2T?;U @;U\A;UBAUC$VDWV;E[VF$VG3WHWHI W^J>WKPWL@XM#XPN#XtO%XPYXQLYR,YeS?YT\YU'Z/V>ZWW3ZX3ZY3ZZ2[2[2[e\"[] [^![_R[`=\Qa=\b@\c>]d>]Me>]f=]g=^ h<^Gi J^j^jk?k.lbknm3knlo6l%p2l\q(lr7lsElt$m6um[vmt-YTmTmMn<_nZnoEQs+t6 Mtb ,t 6t (u u="u)u]vavr]vw2>wNTxYx<y<=yyMyKzZzQ (z!`z"={6#:{t$){%W{&|1':|(L|)}?*}+(~r,)~-L~.3/4F0W{1'213/-46]5Z667D&8Dk9_:B;fS<B=6>Z4?6@"A'B'C29D4lEBF@G4%H!ZI&|JAK@L9&M8`N:O9P>Q=MR8SjT7/UigV<WnX;}YmZ$'[3L\K].#EI8@ 1 &< ,c / E LJS>f`DV++(BTUsage : %s messageid (default)message Usage : %s message_id default_message %s usage : node.servers Starting Servers. Stopping Servers. Starting Fallover Servers. Shutting Down Fallover Servers. %s : called with invalid process name. %1$s : Failed fsck -y of filesystem %2$s. %1$s : Failed mount of filesystem %2$s. %1$s : Filesystem %2$s already mounted. Usage : %s filesystems_to_mount %1$s : Failed fsck -p of filesystem %2$s. %1$s : %2$s mounted. %1$s : Backgrounding attempted mount of %2$s:%3$s. %1$s : Failed mount of %2$s:%3$s after 0 attempts. Usage : %2$s tries host filesystem. %1$s : Failed mount of %2$s:%3$s. Usage : %s attempts hostname file_systems_to_activate . %1$s : Failed varyonvg of %2$s . Usage : %s volume_groups_to_varyon %1$s : Volume group %2$s already varied on. %1$s : Failed obtaining logical volume for %2$s from ODM. %1$s : Failed umount of %2$s. Usage : %s filesystems_to_umount %s : Bad number of arguments. %1$s : Failed varyoff of %2$s. Usage : %s volume_groups_to_varyoff %1$s : Volume group %2$s already varied off. %1$s : Unable to make device %2$s available. Check hardware connections. Completed execution of %1$s with parameters: %2$s. Exit Status = %3$s. Starting execution of %1$s with parameters: %2$s. %s usage : cl_disk_available hdisk ... %1$s : Undefined disk device %2$s (May have been duplicate). %s usage : cl_disk_keep diskname %1$s : Disk %2$s marked as KEPT %1$s : Error marking %2$s as KEPT %s usage : cl_disk_reset /dev/diskname ... %s usage : cl_disk_test diskname ... %1$s : Disk %2$s fenced. %1$s : %2$s not a SCSI device. %1$s : Unable to export %2$s. %s : Unable to start rpc.mountd via SRC. Usage : %s '[hostname_given_root_access]' 'fs1 fs2 ...' %s usage : cl_is_scsidisk hdiskname %1$s : Device %2$s not configured. %s : Bad number of arguments! %s : aborted by user! %s : cllockd must be running for demo %s : imserv already running. Usage : %s %1$s : %2$s is not a known file system! %1$s : unable to start %2$s : %3$s not available. Usage : %s filesystem process [retries] %1$s : running %2$s %1$s waiting to start %2$s Usage : %s %1$s : Failed ifconfig %2$s inet %3$s netmask %4$s up.! %1$s : Configuring network interface %2$s at IP address %3$s %1$s : Detaching network interface %2$s and network interface %3$s Usage : %s cascading/rotating acquire/release interface new_address old_address netmask or %s swap_adapter swap interface1 address1 interface2 address2 netmask %1$s : Failed syncvg of %2$s. %1$s : Successful syncvg of %2$s %1$s : Backgrounding sync of %2$s %1$s : Volume group %2$s not varied on. Syncvg not attempted. Usage : %s [-b | -f] volume_groups_to_varyon %1$s occurred at %2$s Usage : event Usage : list of volume groups %1$s : encountered an error on scsidiskutil -k /dev/%2$s %1$s %2$s Usage : %s device (e.g. hdisk1) Failed %s : Script Exiting. Manual intervention may be required. Check cluster.log [default location: /usr/es/adm] and/or hacmp.out [default location: /tmp] for more information. self_down() not implemented. %s usage : topchng.rc node_id %s : Environment variable LOCALNODEID not set. Erroneous topchng.rc call ! Null message, save for future use. %s : Using standby interface as service interface. Failed %s : Script Exiting. Manual intervention may be required. Check cluster.log [default location: /usr/es/adm] and/or /tmp/clstrmgr.scripts.out for more information. %1$s will be exchanged with %2$s Netchng may have failed to swap interfaces. Use the netstat -r command to check routes. %1$s exchange with %2$s network_join: No action defined. global_network_failure: No action defined. node_network_failure: No actions defined. Incorrect FAILTYPE parameters. %s usage: cl_is_array hdisknameERROR: Could not get the value of %1$s. %1$s : Failed to unexport the file system %2$s. A flag or parameter is missing. USAGE : cl_9333diskreset controller_special_file disk_number [-D] disk number is 0, 1, 2 or 3 -D option turns on debug EXAMPLE : lsdev gives hdisk1 Defined 00-02-00-03 857MB F Serial-Link Disk Drive serdasdc0 Available 00-02-00 Serial-Link Disk Controller reset syntax is : cl_9333diskreset /dev/serdasdc0 3 Usage : %s [ -lvip ] mount_point Or: Usage: %s -g volume_group %1$s: ODMErrNo: %2$ld: Retrieval of ODM error message failed. %1$s: ODMErrNo : %2$ld: %3$s %1$s : Unable to find %2$s object with %3$s. %1$s : ODM failure getting %2$s object(s) Usage: %s ctlrx Could not open the Adapter %1$s Errno : %2$ld Could not open the Controller %1$s Errno : %2$ld Could not reset the Controller %1$s Errno : %2$ld cfgdevice: odm_initialize() failed. cfgdevice: open class CuDv failed odmerr %ld. cfgdevice: failed to find CuDv object . cfgdevice: ODM failure getting CuDv object. cfgdevice: error closing CuDv object class. Usage: %s [-k] [-u] Directories Print out list of processes which are open against a given filesystems and appends pid with either c (cur dir), r (root dir), e (exec'd entry), -k Kills the process t (loader list) or f (fs being checked). -u Appends (login name) -d Show Debug Output -? or -h Shows usage Empty file name %s: Mount point not found Illegal option %s ignored. Not an nfs directory. usage : %s openx failed on device %s. errorno %ld %1$s : Volume group %2$s in non-concurrent mode. %1$s : Volume group %2$s must be varied on. %1$s : Failed mode3 -n of %2$s. %1$s : Failed varyonvg -m2 -n -c of %2$s. Usage: %s [-n | -s] volume_groups_to_varyon %1$s : Failed reset/reserve for device /dev/%2$s. %1$s : Failed reset failed for device /dev/%2$s. Usage : %s [-m] [-l] [-s] [-b] [-i] [-r] %1$s : called with flags %2$s Error: %s : Cannot start Cluster Services without portmap. %s : Unable to start Cluster Information Services (clinfo) via SRC. %s : Unable to start Cluster Manager (clstrmgr) via SRC. %s : Unable to start Cluster SMUX Peer Daemon (clsmuxpd) via SRC. Error: %s : SRC unable to start snmpd. %s : Unable to start Cluster SMUX Peer Daemon (clsmuxpd) without snmpd. Error: %s : Cannot start Cluster Lock Manager (cllockd) without Cluster Manager (clstrmgr). %s : Unable to start Cluster Lock Manager (cllockd) via SRC. %s : Cluster Manager (clstrmgr) is already running. %s : Cluster SMUX Peer Daemon (clsmuxpd) is already running. %s : Cluster Lock Manager (cllockd) is already running Starting execution of %1$s with parameters: %2$s Starting syslogd Starting portmapper Usage : %s -f | -g[r] [ -s ] [ -y ] [ -N | -R | -B ] %s : Ignoring obsolete -i option. %s : Ignoring obsolete -t option. %s : Shutdown confirmed by operator. %s : Shutdown not confirmed by operator. Usage : %s [-a] [cllockd|clstrmgr|clinfo|clsmuxpd] Usage : %s %s : Unexpected termination of %s. %s : Halting system immediately!!! Usage : %s [-boot] [-l] [-i] [-b] [-N | -R | -B] %s : Update of /etc/inittab failed. %s: srcmstr is not running. Checking for srcmstr active... ERROR: srcmstr is not accepting connections. complete. ERROR: srcmstr is not running. Usage : %s -M Mode -n Name -m Method -p Persistence [-P pid] [-c class] [-t type] [-a alert] [-N resource_name] [-C resource_class] [-T resource_type] [-i error_id] [-l error_id_label]Usage : %s 'true|false' %1$s : error modifying inittab. Original file saved as %2$s. %1$s : error modifying rc.net. Original file saved as %2$s. %1$s : Unable to move old inittab %2$s to %3$s. %1$s : Unable to move old rc.net %2$s to %3$s. Usage : %s Notify_Method_Name Usage : %s cmd [args] %1$s/%2$s saved in %3$s/%4$s %1$s/%2$s/%3$s copied to %4$s/%5$s %1$s/%2$s/%3$s : Not found! %1$s/%2$s copied to %3$s/%4$s %1$s/%2$s : Not found! Usage : %s Where code is one of the following: HS: Hot standby RS: Mode 1 Rotating Standby M3RS: Mode 3 Rotating Standby MT: Mutual Takeover OST: One-Sided Takeover TPT: Third-Party Takeover 143: One-For-Three Takeover Usage : %s {-n | -p | -P | -c | -t | -a | -i | -l | -N | -C | -T} Usage : %s %1$s : bad format for %2$s Usage : %s [-d odmdir] [-c class] name=val ... Usage : %s -s or /t%s -a [-c configuration] [-r role] IPAT: Unset or set to invalid value. %s : Cannot determine current configuration or current configuration undefined. Can not find valid address to node %s. %s : Shutting down Cluster Group. Continue[y/n]? Error : %s : You must be root to run this command. No action taken. Exiting. Error : %s : Could not send SIGHUP to %s %s : Cluster Manager not currently running. Usage: %s [ -c | -t ] filename %s: Unable to properly generate new 9333 Disk Fencing ODM. Searching for 9333 Disk Fencing ODM objects. Please wait... Restoring previously configured HACMPfence objects. Usage : %s [-a] [cllockd|clstrmgr|clinfo|clsmuxpd|clresmgrd] Usage : %s vars_file OST '{ PAS or SAS/STBY }' %s vars_file MT ' { PS or SS }' %s vars_file TPT '{ PAS or SAS or STBY }' %s vars_file HS ' { AS or STBY }' %s : Can not open file %s %s : bad config argument. Updating ODM... Bad value for conf. %1$s : Foregrounding sync of %2$s %1$s : Successful varyonvg of %2$s with warning %3$s. %s : WARNING: You have specified a concurrent volume group. In order to use concurrent volume groups, the HACMP Concurrent LVM must be configured (use cllvm command). %s : ERROR: You have specified a concurrent volume group. In order to use concurrent volume groups, the HACMP Concurrent LVM must be installed and configured. Usage : %s -o|-t|-r [-d odmdir] [-c class] [-l label] -n nodeid name=val ... %1$s: Failed to varyon volume group %2$s in passive mode. %s: ERROR: The environment variable LOCALNODENAME was not set %1$s: Could not vary on volume group %2$s in passive mode because it is currently in use %1$s: Failed to varyon volume group %2$s in active mode. %s: Unable to turn name serving OFF. %s: Unable to turn ypbind (NIS) OFF. %s: Unable to turn name serving ON. %s: Unable to turn ypbind (NIS) ON. Starting ypbind. Usage: start_imagedemo [ -d directory_containing_images ] [ -a Service_Address ] %s does not exist or is not executable. Failed to stop %s. Usage: swap_adapter nodename network failed_ip_address list_of_good_ipaddresses Interface for %s is not found. Can not perform adapter swap on %1$s and %2$s %s: Failure occurred while swapping of Connections network protocols. Manual intervention required. No service address %s was taken by this node. No missing standby was found for service address %s. Cluster has been unstable too long. WARNING: Cluster %s has been running recovery program '%s' for %s seconds. Please check cluster status. %s: Event %s on Cluster %s Completed Successfully. At the user's request, node(s) %s of cluster %s has suspended monitoring the resources for the last %s minutes. Restart cluster services to make the cluster-managed resources on this node highly available again. %s:There are no nodes with unmanaged resources on Cluster %s. Boot communication interface %s is now available. Boot communication interface with address %s is no longer available for use, due to either a boot communication interface failure or IP address takeover. IP Address Takeover of %s failed. %1$s: must wait at least %2$s minutes before cluster restart %1$s: Sleeping %2$s minutes %1$s: The cluster is in migration. The startup option "Manually" for "Manage Resource Groups" is not allowed until the migration is complete. EVENT STARTEVENT COMPLETEDEVENT FAILEDusage: %s: -t clstrmgr [-l level] [-R file] usage: %s: -t cllockd [-R file] usage: %s: -t clruncmd Setting to maximum debug level (9). Setting to minimum debug level (0). This turns debugging OFF. Waiting for clstrmgr to respond to SRC request... %s: SRC command failed. %s: Lock dump command failed. SRC command succeeded. Lock dump command succeeded. The cluster log entry for %s could not be found in the HACMPlogs ODM. Defaulting to log directory %s for log file %s. usage: %s: scripts [-h host] [-s] [-f] [-d days] [-R file] [event ...] usage: %s: syslog [-h host] [-e] [-w] [-d days] [-R file] [process ...] Limited to 7 days maximum with -d option. %1$s: %2$s.%3$s not found. Skipping. %1$s: %2$s not found. Skipping. %1$s: %2$s not found. %s may not be compatible with PowerHA SystemMirror Data file /usr/es/sbin/cluster/tools/clvdevice.dat does not exist. USAGE: %s [-R filename] Your CPU may not be compatible with PowerHA SystemMirror Data file /usr/es/sbin/cluster/tools/clvcpu.dat does not exist. Data file /usr/es/sbin/cluster/tools/clvbos.dat does not exist. Data file %s does not exist. %s is not compatible with PowerHA SystemMirror. It is currently %s. Data file /usr/es/sbin/cluster/clvinval.dat does not exist. The fix for APAR %s is not installed on your system. Data file /usr/es/sbin/cluster/tools/clvreq.dat does not exist. %1$s : Failed concurrent varyon of %2$s due to unsupported mixture of RAID and 9333 devices. %2$s : Failed convaryonvg of volume group %2$s. %s Unable to varyon concurrent RAID %s %1$s Concurrent disk array is reserved, unable to use %2$s %s usage: cl_is_array hdiskname usage: %s volume_groups_to_varyonusage: %s volume_group %1$s : Invalid volume group %2$s. Data file %s does not exist. usage: %s volume_group %1$s: Device %2$s is not available for concurrent use. %1$s: Unable to varyon volume group %2$s for concurrent use. %1$s: Unable to varyon concurrent RAID volume group %2$s. %s: Incorrect hardware. Volume group %s not found Usage: %s hostname [command] %1$s: %2$s does not exist. %s: marserv already running. %1$s: Database file %2$s does not exist. %1$s: Logfile %2$s does not exist. Usage: %s: [-d database_file] [-l logfile] %1$s: Sending sigkill to marserv PID: %2$s. %s: marserv PID not found. %s: Invalid interface name. %s: Invalid interface type. Usage: %s: address interface %1$s: Unable to make device name for interface %2$s. %1$s: Failed rmdev on %2$s. %1$s: Failed chdev on %2$s. %1$s: Failed mkdev on %2$s. %1$s: Unable to find hardware address for %2$s. %3$s corrupted or missing. Usage: %s: interface %s: Unable to create backup copy of inittab. %s: Unable to create backup copy of rc.net. Usage: error type [-h host] [-R file] Where type is: short - short error report long - long error report cluster - clstrmgr and PowerHA SystemMirror error report The daemon is not running. The daemon is not valid Valid daemons are: clstrmgr, clinfo, cllockd, clsmuxpd. The daemon is not running. Usage: trace [-t time] [-R file] [-l] daemon ... Trace interrupted Where: -l level is the level of debugging performed (0 - 9, where 0 turns debugging off) -R file is the file to which output is saved Allows real-time debugging of the Cluster Manager (clstrmgr) daemon. Where: -R file is the file to which output is saved Allows the Lock Resource Table to be dumped and saved. Invalid arguments Where: -h host is the name of a remote host from which to gather log data -s filters start/complete events -f filters failure events -d days defines the number of previous days from which to retrieve log -R file is file to which output is saved event is a list of cluster events Where: -h host is the name of a remote host from which to gather log data -e filters error events -w filters warning events -d days defines the number of previous days from which to retrieve log -R file is file to which output is saved process is a list of cluster daemon processes Where: -t time is the number of seconds to perform the trace -R file is the file to which output is saved -l chooses a more detailed trace option daemon is a list of cluster daemons to trace Comparing CPU type with supported types listed in clvcpu.dat...Your CPU is compatible with PowerHA SystemMirror. Comparing installed devices with supported devices listed in clvdevice.dat... All installed devices are compatible with PowerHA SystemMirror. Comparing installed software with requirements listed in clvbos.dat... All Base Operating System software requirements met. Checking files for HACMP for OS-specific modifications... Comparing installed PTFs with incompatible PTFs listed in clvinval.dat. No installed PTFs known to be incompatible with PowerHA SystemMirror. Retrieving installed APARs from system... Comparing installed APARs with requirements listed in clvreq.dat... All required APARs installed. %1$s : Removing duplicate disk device: %2$s %1$s : Attempting to make disk device: %2$s %1$s : Failure occurred while processing Resource Group %2$s. Manual intervention required. %1$s : %2$s failed updating 9333 fence registers. Manual intervention required. %1$s : %2$s forced down. Completed Disk Fencing. Exiting immediately. %1$s : NFS Mounting failed. No reachable service interfaces found on node %2$s %1$s : %2$s failed updating SSA fence registers. Manual intervention required. %1$s : Problem with resource location database in HACMPdaemons ODM. Usage: %s groupname Usage: %s groupname [graceful | forced] Usage: %s Usage: %s -s [-r] [-t] or: Usage: %s -v [-r] [-t] [-N] [-m cust_module1 ...] [-e error_count] [-c] [-R filename] %s: Error(s) have been detected. Usage: %s [-r] [-t] Failed executing clchipat on node %s. Adding any necessary PowerHA SystemMirror entries to /etc/inittab and /etc/rc.net for IPAT on node %s. Modifying any necessary PowerHA SystemMirror entries from /etc/inittab and /etc/rc.net on node %s. Removing any existing PowerHA SystemMirror entries from /etc/inittab and /etc/rc.net for IPAT on node %s. All Usage: %s [start | stop] Unable to remove module %s from default verification list of modules Unable to add module %s in default verification list of modules Information: No module is selected to verify WARNING: A link did not exist for the PowerHA SystemMirror ODM %s between directories '%s' and '%s'. The link has been created successfully ERROR: A link does not exist for the PowerHA SystemMirror ODM %s between directories '%s' and '%s'. The link could not be created because a file exists with the same name %s in link location '%s'. Please remove the file and re-run verification and synchronization. The link could not be created, please fix the error by running the command "ln -s %s %s" then re-run verification and synchronization. Usage: %s -r recalc_rate -D decay_rate %s: The decay rate must be a floating point numeral. %s: The local node name is undefined. Please ensure the cluster topology has been successfully synchronized. %s: The Lock Tuning Statistic Recalculation Rate must be a positive integer equal to or greater than five (5). %s: The Lock Tuning Statistic Decay Rate must be a positive decimal number between zero and one (0.0 - 1.0). Usage: %s Usage: %s NOTE: Received failed return code from command: %s Usage: %s [-a] [-c] [-C] [-f true|false] [g] [h] [i] [-l] [-o odmdir] [-r] [-R] [s] [-N filename] -n filename [-m methodlist] [-d description] %s: Removing any existing temporary PowerHA SystemMirror ODM entries... %s: Creating temporary PowerHA SystemMirror ODM object classes... %1$s: ODM delimiter for %2$s.odm not found. %s: Adding PowerHA SystemMirror ODM entries to a temporary directory. %1$s: Snapshot %2$s.odm not found. %s: WARNING: Applying a Cluster Snapshot while any node (which is part of the current cluster, or part of the cluster as defined in the snapshot) is currently running the PowerHA SystemMirror Cluster Manager or associated daemons may cause unrecoverable data loss and compromise the cluster integrity. Also, the PowerHA SystemMirror configuration on all nodes in the current cluster will be removed to prevent interference with the applied cluster. %s: Unable to save current snapshot. Aborting. %1$s: Current snapshot saved as %2$s/~snapshot.1 %1$s: Removing current configuration from node %2$s... %s: Verifying configuration using temporary PowerHA SystemMirror ODM entries... %s: VERIFIED error(s) found. Ignoring due to setting of 'force' flag. %s: Removing current PowerHA SystemMirror ODM entries... %s: Adding new PowerHA SystemMirror ODM entries... %s: Synchronizing PowerHA SystemMirror Topology/Resource ODMs to all cluster nodes... %s: Synchronizing PowerHA SystemMirror Resource ODMs to all cluster nodes... %1$s: Verification determined %2$s error(s) occurred. Please correct any errors and retry. %1$s: %2$s already exists. %1$s: Creating file %2$s... %1$s: Unable to create %2$s. Check permissions. %1$s: Executing clsnapshotinfo command on node: %2$s... %1$s: The following information is retrieved from node: %2$s... %1$s: Unable to retrieve information from node %2$s. Aborting. %1$s: Beginning description delimiter for %2$s.odm not found. %1$s: Ending description delimiter for %2$s.odm not found. %1$s: Snapshot %2$s.odm not found. %1$s: Removing cluster snapshot: %2$s.odm %1$s: Removing cluster snapshot: %2$s.info %1$s: Saving current snapshot ODM file to /usr/es/sbin/cluster/snapshots/%2$s.odmXXX where XXX is this Process ID. %s: Error changing the description. Replacing modified file with saved file... %1$s: Saving current snapshot INFO file to /usr/es/sbin/cluster/snapshots/%2$s.infoXXX where XXX is this Process ID. %s: Successfully changed snapshot. Removing temporary files... %s: Local node not properly configured for PowerHA SystemMirror. %1$s: Failed applying Cluster Snapshot: %2$s. %1$s: Succeeded applying Cluster Snapshot: %2$s. %1$s: Failed creating Cluster Snapshot: %2$s. %1$s: Succeeded creating Cluster Snapshot: %2$s. %1$s: Failed generating temporary ODM containing Cluster Snapshot: %2$s. %1$s: Succeeded generating temporary ODM containing Cluster Snapshot: %2$s. ERROR: creating or applying snapshots is not recommended while the cluster is in the middle of a migration. Aborting. %1$s: Failed removing Cluster Snapshot: %2$s. %1$s: Succeeded removing Cluster Snapshot: %2$s. %1$s: Failed changing Cluster Snapshot: %2$s. ERROR: unable to verify outbound clcomd communication from the local node, "%1$s", to node "%2$s". %1$s: Succeeded changing Cluster Snapshot: %2$s. Warning: unable to verify inbound clcomd communication from node "%1$s" to the local node, "%2$s". %1$s: Unable to create PowerHA SystemMirror ODM classes in %2$s. The original cluster ODMs will *not* be restored, due to the use of the force flag. ERROR: insufficient space for "%1$s". %2$s KB is needed, but only %3$s KB is available. Attempting to restore the original cluster ODMs... The original cluster ODMs have been successfully restored. ERROR: %1$d error(s) occurred while attempting to restore ODMs. %s: Waiting (up to two minutes) to detect SP switch (css0) to be in up state. %s: Warning: SP switch interface (css0) not in up state. %s: Error: No SP switch boot or service interface (css0) configured. %s: Error: Changes have been made to the Cluster Topology or Resource configuration. The Cluster Configuration must be synchronized before starting Cluster Services. Usage: %s manage | unmanage | event [nodename] %s: Eprimary management via PowerHA SystemMirror is supported only with switch level TB2 (HPS). A switch level other than TB2 was detected. done. Waiting for Resource Manager to initialize... Starting Cluster Services on node: %1$s This may take a few minutes. Please wait... Setting /etc/inittab to start PowerHA SystemMirror at system reboot for node(s): %1$s %1$s: Assigning HPS Eprimary to node %2$s. %s: Primary node not defined. Usage: %s [-s | -t] %1$s: SP switch in progress of being started. Waiting up to %2$s seconds for successful completion. %s: Unable to Estart SP switch. Please contact the Cluster Administrator. Exiting with error. %1$s: Unable to Eunfence node %2$s. Please contact the Cluster Administrator. Exiting with error %1$s: Node %2$s remains isolated. Please contact the Cluster Administrator. Exiting with error. %s: Exiting with error. %s: Received a signal. %s: SP Switch in process of being started... Exiting. %s: SP Switch interface is up. %s: The fault_service_Worm_RTG daemon is not running on the primary node. %1$s: Switch initialization failed on %2$s - i_stub call error. %1$s: Switch initialization started on %2$s. %1$s: Switch initialization failed on %2$s - insufficient memory. %1$s: Switch initialization failed on %2$s - daemon unavailable. %1$s: Switch initialization failed on %2$s - i_stub problem. %1$s: Switch initialization %2$s second time limit exceeded. %1$s: Initialized %2$s node(s). %s: Switch initialization completed. %s: Error reading the SDR. %1$s: Node %2$s failed retrieving Eprimary data. %1$s: Node %2$s failed becoming Eprimary. %1$s: Node %2$s unable to start Eprimary. %1$s: Node %2$s unable to determine Eprimary node. %1$s: event [nodename] %1$s: Failed fence for PVID: %2$s. %1$s: Failed clearing fence for PVID: %2$s. %1$s: Failed setting fence for PVID: %2$s. %s: Self down hard. Should never get here. %1$s: Failed ifconfig %2$s inet %3$s netmask %4$s alias. %1$s: Failed ifconfig %2$s inet %3$s delete. usage: %s interface address netmask or: %s interface address netmask delete Usage: cl_dcd_acd -n -d Warning: Cannot remove %1$s from %2$s Fatal Error: Cannot copy %1$s/%2$s to %3$s Successfully restored Default Configuration from Active Configuration. %s does not exist. Exiting normally. Usage: %s [-u] [-i] [-n] [-r] [-t] [-v] [-h] [-x] %1$s: Unable to rsh a command to node %2$s or %3$s is not running a version of PowerHA SystemMirror which supports DARE functionality. Please ensure that the proper entries exist in the /.rhosts files on %4$s for this node (%5$s). Please ensure that a proper version of PowerHA SystemMirror is installed on %6$s before continuing. %1$s: Failed removing DARE lock from node: %2$s. %1$s: Detected that node %2$s has an active Cluster Manager process. %1$s: Unable to copy %2$s/%3$s to %4$s. %1$s: Unable to synchronize the PowerHA SystemMirror ODMs to the Active Configuration Directory on node %2$s. %1$s: Unable to synchronize the PowerHA SystemMirror ODMs to the Stage Configuration Directory on node %2$s. %1$s: Detected that node %2$s has been removed from the new configuration. %s: It is recommended to remove the cluster configuration from the removed node(s). Failure to do so may result in cluster integrity problems if PowerHA SystemMirror is re-started on the removed node(s). %s: Failed removing one or more DARE locks. %s: Succeeded removing all DARE locks. %s: No nodes are configured. %s: Failures detected during verification. Please correct the errors and retry this command. %s: Error detected during synchronization. %s: An active Cluster Manager was detected elsewhere in the cluster. This command must be run from a node with an active Cluster Manager process in order for the Dynamic Reconfiguration to proceed. The new configuration has been propagated to all nodes for your convenience. %s: A change has been detected in both the Topology and Resource PowerHA SystemMirror ODMs. Only changes in one at a time are supported in an active cluster environment. %s: A change has been detected in the Cluster Name. Changing the Cluster Name is not supported in an active cluster environment. %1$s: A node (%2$s) cannot be contacted. PowerHA SystemMirror is currently running on this node, but node %2$s is not present in the current cluster configuration. Cluster services must be stopped on node %2$s before it can be removed from the configuration or renamed. %s: A lock for a Dynamic Reconfiguration event has been detected. Another such event cannot be run until the lock has been released. If no Dynamic Reconfiguration event is currently taking place, and the lock persists, it may be forcefully unlocked via the SMIT PowerHA SystemMirror Problem Determination Tools, Release Locks Set By Dynamic Reconfiguration. %s: Unable to set local lock for Dynamic Reconfiguration event. %s: Unable to create a cluster snapshot of the current running cluster configuration. Aborting. %s: Unable to copy the configuration data from the System Default ODM directory to /usr/es/sbin/cluster/etc/objrepos/stage. Aborting. %s: Unable to synchronize the configuration data to all active remote nodes. Aborting. %s: Requesting a refresh of the Cluster Manager... %s: This command must be run from a node with an active Cluster Manager. %s: Unable to create a DARE lock on a remote node with an active Cluster Manager process. %s: Unable to copy the PowerHA SystemMirror ODMs from the System Default Configuration Directory to the Stage Configuration Directory on node %s. %s: Requesting a refresh of each node's Resource Manager... %s: Unable to connect to node %s. The Resource Manager (if active) will not be refreshed on this node at this time. %s: Detected that node %s has an active Cluster Manager process, but the configuration was not successfully synchronized to the node. %s: Could not refresh Resource Manager on node %s. Verifying additional pre-requisites for Dynamic Reconfiguration... ...completed. %s: No changes detected in Cluster Topology or Resources. %1$s: Detected changes to Network Interface Module (NIM) %2$s. Please note that changing NIM parameters via a DARE is currently not supported. %1$s: Detected changes to boot communication interface %2$s. Please note that changing boot communication interface parameters via a DARE is not supported. %s: Detected changes to network %s. Please note that changing network parameters via a DARE is not supported. Error Notify objects with name %s already exists. Must specify -M (Mode) option Usage: %s [-L] [-R] [-P] [r] [-D] [-h]Cluster Lock ManagerCluster ManagerCluster SMUX Peer DaemonCluster Information ServicesStarting %1$s (%2$s) subsystem on %3$s PowerHA SystemMirror on %s shutting down. Please exit any cluster applications...Cluster Snapshot of Default System ODM - %sCOMMAND: %s Clsmuxpd not running. Cluster state unknown. Local node not properly configured for PowerHA SystemMirror. Usage: This command to be run only via SMIT. Supply 4 args for varyoff_import_vg() Arguments are: -v Volume_Group -n Node_List -w Node_With_Vg -p Pvids Supply 3 args for create_import_vg() Arguments are: -v Volume_Group -n Node_List -p Pvids Supply 7 args for create_lv_fs() Arguments are: -l Lv_Name -f Fs_Name -v Volume_Group -n Node_List -m Mount_Point -p Pvids -c Copies Supply 6 args for create_lv_fs() Arguments are: -l Lv_Name -f Fs_Name -v Volume_Group -n Node_List -m Mount_Point -p Pvids Supply 5 args for create_vg_lv() Arguments are: -l Lv_Name -v Volume_Group -n Node_List -c Copies -p Pvids Could not create FS [%1$s] on LV [%2$s] on node [%2$s] No hdisk with PVID [%1$s] on node [%2$s] Could not importvg [%1$s] on hdisk [%2$s] on node [%3$s] Could not reset "auto vary on" attribute for [%1$s] on node [%2$s] exportvg failed on [%1$s] on node [%2$s] Could not varyonvg [%1$s] on node [%2$s] Could not varyoffvg [%1$s] on node [%2$s] Could not reset [%1$s] on node [%2$s] Could not varyoffvg [%1$s] on node [%2$s] Could not create [%1$s] on node [%2$s] Could not create LV [%1$s] on VG [%2$s] on node [%3$s] Supply 7 args for create_import_vg_fs() Arguments are: -l Lv_Name -f Fs_Name -v Volume_Group -n Node_List -m Mount_Point -p Pvids -c Copies Unknown option SSA disk fencing failed Usage: %s -g group -r relation -n nodes -o odmdir NAME=VALUE [NAME=VALUE] Usage: %s group Usage: %s Usage: %s -G new group name -g old group name -r new relation -n new list of nodes -o odmdir NAME=VALUE [NAME=VALUE] Usage: %s nodename Operation parameter (-o) MUST be supplied 1 varyoff_import_vg 2 create_import_vg 3 create_lv_fs 4 create_fs_only 5 create_conc_vg 6 create_import_vg_fs Usage: %s service_address.. Usage: %s takeover_address... Usage: %s nodename failed_standby_address Usage: %s filesystem(s) volume-group(s) pvid(s) Usage: %s nodename joining_standby_address Usage: %s nodename network_name Usage: %s nodename [graceful | forced] Usage: %s [group_name] %1$s: ERROR: Unable to create %2$s. Check permissions. Usage: %s nodename [group_name]%1$s: ERROR: Unable to determine SP switch partition information from the control workstation for node:%2$s . Please make sure this node can communicate with the control workstation. Exiting. Error: SP Switch not started. Usage: %s filesystem(s) volume-group(s) Usage: %s nodename network ip_address1 ip_address2 %s does not exist Unable to make %s directory Error Usage: stop_imagedemo [ -a Service_Address ] Usage: %s filesystem_list volume-group_list Eprimary node updated to SP2 node $PRIMNODE in SDR. Usage: %s service_address usage: %s event nodename and_args: Invalid number of arguments. Could not save xtab file. Please export PowerHA SystemMirror defined filesystems Could not restore xtab file. Please export PowerHA SystemMirror defined filesystems Unable to find %s class Cannot determine status of SSA disk [%s] . is_ssadisk(): odm_initialize erroris_ssadisk(): odm_get_first failed on CuDv ODM. %s: Unable to open device:%1$s: Unable to varyonvg Volume Group %2$s. %1$s: Successfully varied on Volume Group %2$s. %s: Unable to close device.Usage: %s -v volume group -d disk1 ...] No logical name given Must specify either open or close USAGE: close_hty -l logical name -o open -c close -d debug Could not set odm directory path Failed to initialize the odm open class PdDv failed open class CuDv failed No CuDv object for %s No entries retrieved error retrieving from ODM ODM method error - %s requires superuser privileges Usage: %s local_tr_devname remote_hw_address Can't convert hardware address (%s) Address must be in the form 0x123456789012 Illegal option %s ignored. Cannot find fuser in process table. Not an nfs directory. puname: getpwuid failed (uid = %ld) %s readuser: %s lseek into proc table %1$ld failed: %2$s read_user: kmem read proc failed: %s read_user: pids don't match %s: Unable to connect to node %s. Any configuration changes will not be propagated to the node at this time. Any configuration changes must be propagated to the node before the node will be allowed to join the cluster. %s: Unable to execute a command remotely on node %s, check clcomd.log file for more information. Any configuration changes must be propagated to the node before the node will be allowed to join the cluster. %s: Node %s is not running a version of PowerHA SystemMirror which supports DARE functionality. A sync of the ODMs will not be performed. Please refer to the PowerHA SystemMirror Installation Documentation for more information on migration. %s: Unable to retrieve list of configured cluster nodes. Please correct errors in the cluster configuration, and retry this command. %s: Not able to discover the name of the local node. Please check the cluster configuration. %s: Configuring SSA Disk Fencing failed. %1$s: Unable to retrieve pingable address for node %2$s. %s: Node %s does not have PowerHA SystemMirror for AIX 4.2.0.0 or greater installed. Please ensure the proper version of PowerHA SystemMirror is installed on each node in the cluster. %s: Node %s is not running the same version of PowerHA SystemMirror as node %s. DARE functionality is supported between nodes running the same version of PowerHA SystemMirror only. %s: Changes to hbrate for [%s] can not be made to an active Cluster %s: Changes to cycle for [%s] can not be made to an active Cluster ERROR: To automatically correct errors found during verification, the selection for 'Verify, Synchronize or Both' must be set to either 'Both' or 'Verify'. %1$s:ERROR: HTY_LABEL:%2$s not a valid address %1$s:ERROR: MLI_LABEL:%2$s not a valid address %1$s:ERROR: Third parameter:%2$s must be set to either configure or unconfigure %1$s:ERROR: Could not determine interface for MLI_LABEL: %2$s. %1$s:ERROR: MLI_LABEL %2$s is configured on an unsupported device type: %2$s. %1$s:ERROR: Could not determine parent device for MLI_LABEL: %2$s. %1$s:ERROR: Could not alias HTY_LABEL %2$s to MLI_LABEL %3$s %1$s:WARNING: HTY_LABEL %2$s not aliased to MLI_LABEL %3$s %1$s: MLI_LABEL %2$s not configured on this node. HTY_LABEL %3$s not unconfigured. %s: ERROR:: Failed adding SP switch partition information to the HACMPsp2 ODM. ERROR: You must configure the file /usr/es/sbin/cluster/etc/rhosts and then refresh the clcomdES subsystem. After configuring the file /usr/es/sbin/cluster/etc/rhosts, execute /usr/bin/refresh -s clcomdES ERROR: You must configure the file /usr/es/sbin/cluster/etc/rhosts and then start the clcomdES subsystem. After configuring the file /usr/es/sbin/cluster/etc/rhosts, execute /usr/bin/start -s clcomdES ERROR: You must start the clcomdES subsystem. Please execute /usr/bin/startsrc -s clcomdES Verification has completed normally. usage: %s event nodename parameters %s: Can't use the combination of -f and -u. %s: Can't use the combination of -f and -v. Error in executing cl_emul_dare !!!!!!!!!! ERROR !!!!!!!!!! NOTICE >>>> The following command was not executed <<<< %1$s: Enqueing rg_move action (%2$s) for resource group (%3$s)! HACMPlogs ODM is missing or corrupted. Error in reading local node name Error in trying to read the ip address of this node Error in executing cl_rcp No active nodes exist in the cluster Unable to connect to node : %s To run the emulator PowerHA SystemMirror must be version 4.2.2 or higher on all active nodes Failure occurred while executing cl_emul_cspoc . Unable to rsh to node %s Unable to collect the output files No changes detected in Cluster Topology or Resource requiring further processing. Cluster manager not active on this node. No active nodes in the cluster .. Exiting. *******Executing DARE Emulation******** Restoring the System Default Configuration of node %1$s from Active Configuration %1$s : %2$s forced down. Exiting immediately. %1$s: Failure of dare migration during second consistency check. %1$s: Attempt to perform resource group migration with pending changes in either the Topology or Resource PowerHA SystemMirror ODMs. Must perform normal dare (without group migration) first. Then retry migration. %1$s: Requested group migration(s) are incompatible with cluster configuration or state. See above messages for reasons. %1$s: Failure occurred during resource group migration. This is probably because of an intervening cluster event. Check above for the final location(s) of specified group(s). Also, look in the hacmp.out log file (default location: /tmp) to see more information about the failure. Special note: if any resource groups were left in an unknown state (identified with a state name of "????") by this migration, please stop them gracefully with a "cldare -M groupname:stop", after taking the cluster manager out of error mode. Please do this before performing other group migrations or restoring snapshots. %1$s: Node %2$s is not running a version of PowerHA SystemMirror which supports DARE resource group migration. A sync of the ODMs will not be performed. Please refer to the PowerHA SystemMirror Installation Documentation for more information on version migration. Performing final check of resource group locations: Requested migrations succeeded. Waiting for migrations to occur.... completed. %1$s: Problems committing location information to node %2$s. Committing location information to ODM on all nodes... Migration request passed preliminary check for compatibility with current cluster configuration and state. Performing preliminary check of migration request... %1$s: Errors detected while committing location information to above node(s). TIMEOUT! ERROR OCCURRED! %1$s: -N flag specified. Not waiting for requested migrations to complete. Also, skipping final check of resource group locations. No active nodes in cluster and hence no resource groups to migrate. Exiting. Physical cluster resources must not currently support the migration request. Check messages immediately above for problem and/or try different migration. %1$s: Unknown argument or flag: %2$s Emulation in effect. Skipping final check of resource group locations. err_emulate failed to log error %s. Replicated Resource method %1$s failed for %2$s %1$s: Waiting for sddsrv to go inoperative. This could take several minutes if some vpaths are inaccessible WARNING: UUID %1$s not find in %2$s of site %3$s. Run /usr/es/sbin/cluster/xd_generic/xd_cli/clxd_list_rdisk and then run cfgmgr -l hdiskX for those disks showing 'none'. clRGPA call was unsuccessful clRGPA returned unknown job %s: The Dynamic Reconfiguration of the cluster topology is not supported. There was an internal SMIT error calling the function clRGdare. BROADCASTLink The resource group is already on the target node specified. Exiting... Error: no information available for the resource group specified. Usage: %s service_interface This script is for use via glvm replicated resource methods only. It will set, query or clear a flag on all GMVG disks in a resource group. The following parameters are used: -s set flag -q query flag -c clear flag Usage: %s [-s RG | -q RG | -c RG] Usage: %s service_adapter standby_adapter Usage: %s interface HA Comm Links Communications Server not installed or unsupported version. Unable to start Communications Server. Unable to modify DLC %s. The verifysna command failed. Changes will not take affect. Profiles successfully changed. Unable to stop Communications Server. Unable to start link station %s. Usage: %s interface Communications Server is not active. Unable to stop DLC %s. Unable to stop link station %s. Usage: %s Link Name Unable to start port %s. Unable to start DLC %s. Error processing script %s. Usage: %s -n CS DLC name or All [-p (port) or -l (linkstation) ] Unable to query Communications Server. Unable to find DLC %s. %s: Failure occurred while releasing SNA DLC profiles. Manual intervention required. %s: Failure occurred while acquiring SNA DLC profiles. ERROR: You must start the clcomdES subsystem. Please execute /usr/bin/startsrc -s clcomdES Adding Service Principals %s does not exist or is not an ordinary file. Could not Open File %s Extracting Service Principals for %s Enter Password: Reenter Password: Password Mismatch Could not find path to Node Tried IP labels: Perhaps the /etc/krb-srvtab files does not include a path to that host Usage: cl_setup_kerberos (with no arguments) No Cluster Defined! Could not find /.k file on console workstation Logon to console workstation and execute command Could not talk to console workstation Enter master password when prompted Getting boot communication interfaces and service IP labels Getting Realm Getting Nodes For Host: Doing principals: Kerberos not installed on node %s Could not find the ip address of en0 Could not find network name for %s Could not get adapters on network %s %1$s : Interface type of %2$s and %3$s mismatch! %1$s : Invalid interface name, %2$s! %1$s : Unsupported interface type, %2$s! %1$s : Multiple interfaces on the same ATM device is not supported. %1$s : Timeout waiting for interface %2$s to be up and running. Usage: %s device interface address address_of_interface Usage: %1$s -s %2$s -g logname %3$s -c logname -v directory [-r] [-n] %4$s -a logname %5$s -w logname ERROR: the directory %1$s does not exist or is not writable on the local node. Please create this directory and run this command again. NOTE: You must create this directory locally on all nodes for proper functionality. After all log directory changes have been made, please perform a cluster synchronization from this node. This will ensure that all desired changes have been propagated throughout the cluster. HACMPlogs ODM is missing or corrupted. Modification of /etc/syslog.conf file failed. Cllog failed in cluster.log modification clean-up. Location of cluster.log file could not be found. Cllog could not modify the HACMPlogs ODM. ERROR: the log file %1$s is unable to be redirected. %1$s: Warning: The cluster.log log file has already been redirected via modification of the %2$s file on node %3$s. These modifications will be overwritten. %1$s: The directory %2$s, specified for log file %3$s, is part of the AFS-mounted filesystem %4$s. %1$s: The directory %2$s, specified for log file %3$s, is part of the DFS-mounted filesystem %4$s. %1$s: The directory %2$s, specified for log file %3$s, is part of the NFS-mounted filesystem %4$s. %1$s: The directory %2$s, specified for log file %3$s, is part of the filesystem %4$s, which is managed by PowerHA SystemMirror. %1$s: The directory %2$s, specified for log file %3$s, is not an absolute path (does not begin with '/'). %1$s: %2$s: As a result, it could unexpectedly become unavailable. %1$s: %2$s: Therefore, it cannot be used for this purpose. %1$s: It is not possible to contact node %2$s. Therefore, it is not possible to determine whether cluster.log has been configured consistently on all nodes. ERROR: the new log directory %1$s does not have enough free space %2$d to store all the PowerHA SystemMirror logs, %3$d required. ERROR: the move of log file %1$s to the new log directory, %2$s, failed. ERROR: The requested log %1$s directory could not be changed. Changing file size failed. PowerHA does not support rotation and changing size of log file %1$s when rsyslogd is enabled. Please refer to the rsyslogd documentation for more details.Unable to start Cluster Resource Manager (clresmgrd) via SRC Usage: %s event_name [parameter] ERROR: %s: Unable to update the Cluster Manager with parameters %s and %s! Manual intervention may be required! ERROR: %s: clresmgrd still initializing. Manual intervention may be required! Usage: clswapaddress netname ip_address1 ip_address2 %1$s: Cluster not stable. Operation not allowed. %1$s: swap_adapter failed! %1$s: swap_adapter_complete failed! Migration from PowerHA SystemMirror to PowerHA SystemMirror/ES detected. Performing startup of PowerHA SystemMirror services. %s: Migration from PowerHA SystemMirror to PowerHA SystemMirror/ES detected. A DARE event cannot be run until the migration has completed. %s: WARNING: Can not copy resource locator database entries from PowerHA SystemMirror to PowerHA SystemMirror/ES.%s: Copying resource locator database entries from PowerHA SystemMirror to PowerHA SystemMirror/ES... %s: Stopping PowerHA SystemMirror cluster services... %s: WARNING: Unable to stop PowerHA SystemMirror cluster services. %1$s: ERROR: Unable to start the %2$s process! %1$s: WARNING: Unable to refresh topology services! %s: Uninstalling PowerHA SystemMirror filesets. %s: WARNING: Unable to uninstall PowerHA SystemMirror filesets. %1$s: WARNING: Unable to create symbolic link %2$s. %1$s: WARNING: Unable to remove %2$s. %1$s: WARNING: Unable to run %2$s. %1$s: WARNING: Unable to change permissions for %2$s. %1$s: WARNING: Unable to remove %2$s. %1$s: WARNING: Unable to create directory %2$s. %1$s: WARNING: Directory %2$s does not exist. %1$s: Waiting for Cluster SMUX peer daemon (clsmuxpd) to stop. %1$s: Waiting for Cluster information daemon (clinfo) to stop. %1$s: To start the %2$s process, issue the following command: %1$s %1$s: ERROR: Unable to unconfigure the PowerHA SystemMirror global ODM subserver! %1$s: To unconfigure the PowerHA SystemMirror global ODM subserver, issue the following command: %1$s: ERROR: Unable to configure the PowerHA SystemMirror/ES global ODM subserver! %1$s: To configure the PowerHA SystemMirror/ES global ODM subserver, issue the following command: %1$s: Error running migrate event script! Manual intervention required! %1$s: Error running migrate_complete event script! Manual intervention required! ${PROGNAME}: Error completing the migration process!!! ${PROGNAME}: Failed adding ${LPPNAME} information to syslog.conf file. ${PROGNAME}: Failed removing old PowerHA SystemMirror information from /etc/syslog.conf file. %s: odm_initialize() failed. %1$s: odm_get_first failed on %2$s. criteria=[%3$s] %1$s: Inquiry Data at id=0x%2$ld, lun=0x%3$ld: Usage: cllsdisktypes nodename1 [nodename2 ...] %s: Node: %s is not reachable. Please check rhosts entries. Usage: cllsparent -C {-k ChildConnectionKey} nodelist cllsparent -P {-k ChildConnectionKey} nodelist PowerHA SystemMirror errnotify event ERROR: %1$s has occurred on device: %2$s. Usage: %s [newfrequency] [-h|-?] syncd frequency: %s High water mark: %s Low water mark: %s %1$s: ERROR: Cannot modify %2$s. Killing the syncd daemon. %s: ERROR: Cannot change syncd frequency. Starting the syncd daemon. %1$s: ERROR: Invalid argument. New frequency must be an integer from 1-32767. %1$s: Suspend request for monitor %2$s failed. %1$s: Resume request for monitor %2$s failed. %1$s: Could not find resource id for application server %2$s. Usage: %1$s nodename resource_id Bad resource id %1$s. Neither an application server nor an user defined resource. Failure in user-defined script %1$s. Usage: %1$s nodename RG_id [destination] %1$s: Script not found. Usage: %1$s nodename RG_id [ACQUIRE | RELEASE] Usage: %s [-v] [-f] (-n name | -d device) [-o ODM] Usage: %s [-v] [-f] (-n name | -d device) [-o ODM] Usage: %s [-v] (-n name | -d device) [-o ODM] %s: The string "%s" is too long. Maximum length is %ld Usage: %1$s tape_device_name tape_start_script cltape_resource_reserve returned %ld Tape start script '%1$s' for '%2$s' returned %3$ld Usage: %1$s tape_device_name tape_stop_script Tape stop script '%1$s' for '%2$s' returned %3$ld cltape_resource_release returned %ld Usage: %1$s tape_resource_list start script '%1$s' does not exist start script '%1$s' is not executable start script '%1$s' is not a regular file start script '%1$s' is empty cl_tape_resource_get for Tape Resource '%1$s' returned %2$ld Could not spawn cl_tape_resource_get for Tape Resource '%1$s' Usage: %1$s tape_resource_list stop script '%1$s' does not exist stop script '%1$s' is not executable stop script '%1$s' is not a regular file stop script '%1$s' is empty cl_tape_resource_release for Tape Resource '%1$s' returned %2$ld Could not spawn cl_tape_resource_release for Tape Resource '%1$s' Usage: %1$s devicename [start_script [stop_script]] Insufficient (%ld) args. Excess (%ld) args. Tape-start script usage: %1$s tape_device_name Tape-stop script usage: %1$s tape_device_name Tape drive [%s] is available. Tape drive [%s] is not available. Tape drive [%s] has been reserved. Tape drive [%s] has not been reserved. Tape drive [%s] has been released Tape drive [%s] has not been released Copying %1$s to node %2$s. Failed copy of %1$s to %2$s. %s: Forced option is not supported for PowerHA SystemMirror/ES. %s: Use either graceful or graceful with takeover. %s: Cluster services are already running. %s: the -n option is not currently implemented. Skipping Cluster Verification. Performing Cluster Verification because one or more nodes are active. Option "%s" is not valid Custom methods are already defined for device type %s ghost disk method %s is neither a path name, nor a recognized method name break reservation method %s is neither a path name, nor a recognized method name make disks available method %s is neither a path name, nor a recognized method name check reservation method %s is neither a path name, nor a recognized method name SCSI2 - ghosts are those disks with different names from the given disk, but identical 'parent' and 'location' attributes in CuDv SCSI3 - ghosts are those disks with different names from the given disk, but identical 'parent' and 'location' attributes in CuDv, and identical 'lun_id', 'scsi_id', and 'ww_name' attributes in CuAt # provide the path name of a method that will identify ghost disks TARGET - a SCSI target ID will be sent via openx(hdisk_name, ,SC_FORCED_OPEN) PSCSI - The disk is treated as a parallel SCSI device. In particular, the SCSI and LUN id information is retrieved from the 'position' attribute in CuDv. The disk is opened, and either a LUN reset or a target reset is sent, depending on whether a SCSI inquiry reports this device as being SCSI-2 or SCSI-3. (Note that an entry in /etc/cluster/lunreset.lst will cause a SCSI-2 device to be treated like a SCSI-3 device.) FSCSI - The disk is treated as a fibre SCSI device. In particular, the LUN id is retrieved from CuAt. The disk is opened, and a LUN reset is sent. # provide the path name of a method that will break reserves MKDEV - Up to four attempts are made at 'mkdev -l ' # provide the path name of a method that will make disks available SCSI_TUR - The disk is opened, and a SCSI Test Unit Ready command is sent via ioctl(). A response of 'reservation conflict' means that there is a reserve held by another node. # provide the path name of a method that will indicate whether a reserve is held Custom method %s invoked for disk %s to perform %sUsage: %s sitename Usage: %s Available virtual memory paging space Fraction of time disks are busy Available processor time Verifying cluster topology for IP aliasing on node %s... ERROR: Adapters on network %s do not all have the same netmask. ERROR: A hardware address is defined for interface %s on a network that is configured to use IP aliasing ERROR: Standby interface %s is defined on a network that is configured to use IP aliasing ERROR: Serial network %s is configured to use IP aliasing. ERROR: No boot adapters are defined on node %s and network %s. WARNING: There may be an insufficient number of boot adapters defined on network %s. Multiple boot adapters are recommended for networks that will use IP aliasing. ERROR: Boot adapters on node %s and network %s are not on different subnets. ERROR: Service adapter %s on network %s is on a different subnet from any of the boot adapters. ERROR: Shared service %1$s is defined as address to ping for etherchannel device %2$s at node %3$s. ERROR: Shared service %1$s is defined as netaddr of interface %2$s at node %3$s. ERROR: Shared service %1$s is defined as IPv4 alias of interface %2$s at node %3$s. ERROR: Shared service %1$s is defined as IPv6 netaddr of interface %2$s at node %3$s. ERROR: Shared service %1$s is defined as IPv6 alias of interface %2$s at node %3$s. ERROR: Unable to obtain netmask from network interface %s on node %s. Please check and ensure that the network interface %s is properly configured. ERROR: Could not update netmask attribute of HACMPadapter for interface %s. ERROR: Could not update alias attribute in HACMPnetwork for network %s. ERROR: Network interfaces on network %s have different netmasks. Usage: %1$s Another PCI hot plug replacement is happening on this node at this time. Only one replacement process is supported at one time per node. This process will now exit, leaving the adapter and interface untouched. A previous PCI hot plug replacement process terminated abnormally. The network interface that was to be replaced in the previous process could be in an unconfigured or removed state. If you want to continue with this replacement process, remove the lock file %1$s and restart the PCI hot plug replacement. This process will now exit, leaving the network adapter and interface untouched. The cluster is in an unstable state. Canceling PCI hot plug replacement. The network adapter %1$s is an unknown or unsupported network adapter type. The adapter %1$s was not found in a PCI hot pluggable slot. Adapter %1$s is the only alive PowerHA SystemMirror network adapter on this node and there are no other keepalive network paths available. Removing this adapter would cause a partitioned cluster. You cannot continue this procedure. You must do the hot replacement manually with drslot or the SMIT fast-path devdrpci after shutting down HACMP on this node. The IP label %1$s found on interface %2$s was not found as a configured PowerHA SystemMirror interface. Error: Trying to disable network connectivity monitoring on the interface %1$s with IP address %2$s failed. Error: Trying to enable network connectivity monitoring on the interface %1$s with IP address %2$s failed. Canceling PCI hot plug replacement process because of fatal error above. Still waiting until the cluster is stable... A request to swap address %1$s with %2$s failed. The IP address %1$s is a %2$s address. It is being swapped with the boot communication interface address %3$s. The IP label %1$s on interface %2$s is a service label that is part of resource group %3$s, and there are no available boot communication interfaces on the local node to which the service label can be swapped. Temporary loss of connectivity to this interface will pause connectivity for this resource group. Would you like to migrate resource group %3$s over to another node, leave the resource group as is, or cancel the replacement process? Note: Canceling the replacement process now will leave all network settings untouched. 1) Continue Without Moving 2) Move The Resource Group 3) Cancel The Replacement Process Enter Selection [default: 1]: You entered a value that was out of range. Try again. Enter Selection [default: 1]: The resource group %1$s will not be migrated to another node. Warning: Replacement of this adapter will pause all connectivity to this resource group while the replacement is progress. Canceling the hot plug replacement via user request. No network settings have been changed. You entered: %1$s %1$ld) Cancel Migration %1$ld) Cancel The Replacement Process There are no alive cluster nodes to which the resource group %1$s can be migrated. Note: Continuing will cause the resource group to be unavailable during the replacement. Do you want to continue with the replacement? 1) Yes 2) No Enter Selection [default: 1]: The resource group %1$s will not be migrated to any node. The resource group %1$s will be migrated to the node %2$s. Enter Selection [default: 1]: Do you want to migrate this resource group back to this node, %1$s, after the hot replacement is complete? 1) Yes 2) No Enter Selection: [default: 1]: Successfully unconfigured the interface %1$s. Error: Could not unconfigure the interface %1$s. Manual intervention may be required to reconfigure affected interfaces and adapters. Successfully unconfigured device %1$s. Error: Could not unconfigure device %1$s. Error: DARE migration of resource group %1$s to node %2$s failed. Migrating resource group %1$s to node %2$s. Successfully configured device %1$s. Successfully disabled network connectivity monitoring on the interface %1$s with IP address %2$s. No failure events on %1$s should be generated as a loss of network connectivity. Successfully enabled network connectivity monitoring on the interface %1$s with IP address %2$s. Successfully configured the interface %1$s. Error: Could not configure the interface %1$s. Swapping %1$s address %2$s back with communication interface address %3$s. Although the hot replacement was a success, and all interfaces have been reconfigured, not all service IP addresses and boot communication interface addresses are back to their original location, and no resource groups have been moved back to this node. Manual intervention will be required to resolve these issues. Error: Could not configure device %1$s. The device %1$s is in the "defined" state. All interfaces associated with this adapter are unconfigured. The PCI Hot Plug procedure is waiting for cluster events to be processed before continuing. This may take a few minutes. Resource group %1$s was no longer found up on the local node. Skipping migration for this resource group. Resource group %1$s was no longer found up on the local node. No available boot communication interface was found with which we can configure interface %1$s. This interface will be left unconfigured. Configuring interface %1$s with address %2$s. The boot communication interface address %1$s, that was previously swapped with boot communication interface address %2$s, was not found to be alive now. Configuring interface %1$s with boot communication interface address %2$s. Configuring interface %1$s with boot communication interface address %2$s. The service IP address %1$s, that was previously swapped with network interface address %2$s, was not found to be alive now. Configuring interface %1$s with service IP address %2$s. Configuring interface %1$s with boot communication interface address %2$s. Configuring interface %2$s with standby address %2$s. The IP label %1$s on interface %2$s is a boot communication interface IP label, but there are no available boot communication interfaces with which it can be swapped. %1$s will be unavailable during the replacement. The IP label %1$s on interface %2$s is a service IP label that is not part of a resource group, but there are no available boot communication interfaces with which it can be swapped. Continuing this process will cause %1$s to be unavailable during the replacement. Would you like to continue with the replacement process? 1) Continue 2) Cancel Enter Selection: [default: 1] Error: Could not change hardware address on device %1$s to %2$s. It is now OK to hot replace the PCI adapter %1$s. The PCI slot number is %2$s. Note: If you now decide that you do not want to replace the adapter, but rather to keep the current adapter, you may leave the current adapter in the PCI slot and just hit Enter. The adapter will be reconfigured normally, and minimal disruption to the cluster will be caused. The hot replacement of %1$s failed. If you did not manually cancel the replacement, then an error occurred. See the above error message for details. Would you like to try the replacement again? 1) Yes 2) No Warning: Selecting No will cancel the hot replacement process, and the adapter will be left unconfigured, the PCI slot will be left in the removed state, and network connectivity monitoring will resume on the adapter if it was alive before the replacement began. This will not cause an adapter fail event to occur. Enter Selection: [default: 1]: Cancelling the hot replacement process. Adapter %1$s and all related interfaces will be left unconfigured. The PCI slot %2$s will be left in the removed state. Network connectivity monitoring will resume on all related interfaces that were alive before the replacement began. Continuing to shutdown PowerHA SystemMirror on node %1$s gracefully with takeover as you requested. The hot replacement was a success. Configuring adapter %1$s... Configuring interface %1$s... Still waiting for the user to confirm replacement of adapter %1$s after %2$s minutes. Hit Enter to confirm the replacement. PCI SlotAdapterInterfaceIP Label The IP label %1$s on interface %2$s is a service IP label that is part of resource group %3$s, which you have chosen to migrate to node %4$s. Thus, this service IP label will be migrated to that node as well. Hot plug replacement is not supported when the cluster is not running. %1$s: Waiting up to %2$s seconds for address %3$s on interface %4$s the program you specified, %s, does not exist the event name you specified, %s, is not valid A user defined event is already defined with that name. Option "%s" is not valid Failed adding %s event to the ODM. Event %s successfully added to the ODM. No descriptionGenerated by the clstrmgr daemonGenerated by cluster scripts and daemonsCluster history files generated dailyGenerated by the cluster Shared Memory libraryGenerated by CSPOC commandsGenerated by deadman's switch activityGenerated by the event emulator scriptsGenerated by event scripts and utilitiesGenerated by Application Availability Analysis toolGenerated by Cluster Verification utilityGenerated by clcomd daemonGenerated by clcomd daemon, debug informationGenerated by Two-Node Cluster Configuration AssistantGenerated by cluster utilities and file propagationGenerated by the Cluster Test ToolGenerated by Auto Verify and SynchronizeGenerated by Application DiscoveryOracle Smart Assistant LogUsage: clpollev pid Usage: clsubmitev [-o odmdir] eventname [argument ...] %s: Changes made to the networking configuration will result in IP aliasing support being enabled or disabled. Please note that changing these network parameters via a DARE is not supported. %1$s : Configuring network interface %2$s with aliased IP address %3$s %1$s : %1$s: Removing aliased IP address %2$s from network interface %3$s Problems encountered whilst verifying cluster topology for IPAT using IP aliasing. Verifying cluster topology for IP aliasing. WARNING: There may be an insufficient number of boot adapters defined on node %s and network %s. %1$s : Failed to ifconfig alias %1$s on interface %2$s. %1$s : Failed to ifconfig delete alias %1$s from interface %2$s. %1$s: Cannot create subdirectory %2$s in /etc/wlm %1$s: Cannot rename file %2$s %1$s: Cannot read file %2$s from active WLM configuration %1$s: Cannot write to %2$s %1$s: Cannot write file %2$s in directory %3$s %1$s: Failed getting node information from %2$s %1$s: Cannot determine name of HA WLM configuration %1$s: Cannot start Workload Manager. Failure running wlmcntrl utility %1$s: Cannot stop Workload Manager. Failure running wlmcntrl utility %1$s: Attempting to restart Workload Manager with previous configuration Deleting %1$s from node %2$s. Failed removing %1$s from node %2$s. Updating ODM errnotify on node %1$s. Failed updating ODM errnotify on node %1$s. Error: Directory for the cl_event_summaries.txt file does not exist. Error: Directory for the specified %s file does not exist. Warning: hacmp.out file is not readable. Warning: %1$s/cl_event_summaries.txt file currently does not exist. Event summaries will be extracted from hacmp.out and appended to cl_event_summaries.txt. Warning: hacmp.out file is empty. Failed to setup persistent node IP labels on node %1$s. Cannot find valid address to node %1$s. Could not copy priority override location information to node %1$s. %1$s: Resource Group Migration via the -M flag is no longer supported. You must use the clRGmove command instead. Failed to delete the priority override location information file from node %1$s. %1$s: Failed to find a configured boot or service interface for persistent IP label %2$s. The persistent IP label %2$s will not be available. Notice: HATivoli is installed, and HATivoli IP aliases are configured. All HATivoli post-event script entries will be removed from PowerHA SystemMirror, but the HATivoli post-event script files will be left in place. If you have modified any of the HATivoli post-event scripts and desire that your modifications still be run as post-event scripts, then you will need to move your code to a new script and add that script to PowerHA SystemMirror as the appropriate post-event script. WARNING: Service adapter %1$s on network %2$s is not on the same subnet as any of the boot adapters on this network. Setting attribute for network (%1$s) to use IP aliasing for IP address takeover ERROR: Service adapter %1$s on network %2$s is on the same subnet as at least one of the boot adapters on this network. Service labels must be on a different subnet when the network is configured to use IP aliasing for IP address takeover. ERROR: The cluster version number %1$s read from HACMPcluster is not compatible with this version of the PowerHA SystemMirror for AIX software. Supported versions are 4 and 5. Check the configuration and conversion log for errors. %1$s: Interface for boot communication interface %2$s could not be found. %1$s: Interface for service IP label %2$s could not be found. %1$s: Using boot communication interface %2$s instead. %1$s: Check hacmp.out for events prior to this one which may have consumed the available boot communication interfaces. %1$s: Service adapter %2$s will not be recovered by this event. IP Aliasing for IP address takeover is not supported on network (%1$s). Aliasing attribute set to 'unsupported'. Time out waiting for %s.A port must be specified for driver %s.Driver %s is not defined.Driver %s is not available.Failed creating %1$s on %2$s.Failed starting COMIO service for %s.Failure removing COMIO port %s.Failure removing port %s.Usage: To query the current time: clavan -c To analyze application uptime over a range of time: clavan -a app_name -b begin_time -e end_time where: app_name name of application to be analyzed begin_time start of time range end_time end of time range The format of the begin_time and end_time arguments is as follows: YYYY:MM:DD:hh:mm:ss %1$s: The -c flag must not be specified with other command line flags. %1$s: Missing or incorrect command line data. %1$s: The time parameters specified with the -b and -e flags were not correctly formatted. %1$s: Beginning or ending time exceeded system maximum. %1$s: Ending time must be greater than beginning time. %1$s: Not able to determine cluster nodes. %1$s: Not able to get address for node %2$s. %1$s: Not able to contact node %2$s. %1$s: Not able to determine location of clavan.log file. %1$s: Not able to obtain clavan.log file size from node %2$s. %1$s: Not able to perform file system space check. %1$s: Not enough file system space free to perform uptime analysis. %1$s: Not able to retrieve clavan.log file from node %2$s. %1$s: Not able to open combined clavan.log file. %1$s: Not able to remove temporary file. %1$s: Beginning time year field is out of range (1970-2037). %1$s: Beginning time month field is out of range (01-12). %1$s: Beginning time day field is out of range (01-31). %1$s: Beginning time hour field is out of range (00-23). %1$s: Beginning time minute field is out of range (00-59). %1$s: Beginning time second field is out of range (00-59). %1$s: Ending time year field is out of range (1970-2037). %1$s: Ending time month field is out of range (01-12). %1$s: Ending time day field is out of range (01-31). %1$s: Ending time hour field is out of range (00-23). %1$s: Ending time minute field is out of range (00-59). %1$s: Ending time second field is out of range (00-59). %1$s: Application name specified is not recognized. %1$s: Not able to obtain resource information. Analysis begins: %1$s, %2$s-%3$s-%4$s, %5$s:%6$s Analysis ends: %1$s, %2$s-%3$s-%4$s, %5$s:%6$s Total time: %1$s days, %2$s hours, %3$s minutes, %4$s seconds Uptime: Downtime: Amount: %1$s days, %2$s hours, %3$s minutes, %4$s seconds Percentage: %1$s Longest period: %1$s days, %2$s hours, %3$s minutes, %4$s seconds Log records terminated before the specified ending time was reached. Application monitoring was not active during the time period analyzed. Application monitoring was suspended for %1$s of the time period analyzed. %1$s: Not able to obtain event data. %1$s: Not able to open recovery program file. %1$s: Possible corruption detected in event data. Cluster services were manually restarted during the time period analyzed. A hard node failure occurred during the time period analyzed. MondayTuesdayWednesdayThursdayFridaySaturdaySundayJanuaryFebruaryMarchAprilMayJuneJulyAugustSeptemberOctoberNovemberDecemberUsage: To query the interface name for a boot communication interface/service IP label : clresetiface -q adapter To get a list of all adapters which have interfaces configured: clresetiface -l To reset the interface name for an adapter: clresetiface -r adapter No boot communication interface was specified. Not able to retrieve HACMPadapter information from ODM. Cluster services must be stopped before attempting to reset the network interface. Not able to open temporary file for writing. Not able to update configuration in ODM. Configuration changes have been made. Please re-synch the cluster configuration in order to complete the boot communication interface reset operation. %1$s: Not able to analyze application availability because both the start and end times specified are earlier than the first timestamp in the log. %1$s: Not able to analyze application availability because both the start and end times specified are later than the last timestamp in the log. Usage : %s [-boot] [-l] [-i] [-b] [-N | -R | -B] [-r] Application monitoring state was manually changed during the time period analyzed. Application monitor failed during the time period analyzed. Application analyzed: %1$s Application analyzed is part of a concurrent resource group. %1$s: Unable to vary on volume group %2$s because logical volume %3$s is incomplete WARNING: The Cluster Name has changed. If enhanced concurrent volumes groups are to be added, it is recommended that you reboot the cluster nodes. ERROR: The cluster version number %1$s read from HACMPcluster is not compatible with this version of the PowerHA SystemMirror for AIX software. Supported versions are %2$s and %3$s. Check the configuration and conversion log for errors. Usage: %1$s nodename failed_address Interface %1$s has failed on node %2$s. Usage: %1$s nodename address Interface %1$s is now available on node %2$s. Usage: %1$s nodename message_id. Unrecognized message id %1$s. Creating GPFS Cluster Creating GPFS Filesystem Removing GPFS Cluster Verifying PowerHA SystemMirror Cluster ERROR: GPFS cluster already exists. ERROR: Bad GPFS configuration detected ERROR: The HACMPgpfs ODM data does not match with mmlscluster command ERROR: Failed to determine current GPFS configuration. ERROR: PowerHA SystemMirror Cluster must be synchronized before configuring a GPFS cluster ERROR: Some of the PowerHA SystemMirror cluster nodes are not available ERROR: FAILED to receive adapter info from the PowerHA SystemMirror configuration Successfully created the GPFS cluster GPFS cluster creation failed. Verifying GPFS configuration ERROR: Cannot add GPFS filesystem - no GPFS cluster exists Verifying cluster nodes availability ERROR: Failed to start GPFS Failed to create the temporary file for the disk descriptor Creating the logical volumes Logical volume creation failed. Failed to create the GPFS filesystem Successfully created the GPFS filesystem WARNING: GPFS is using Single Node Quorum with Disk Leasing enabled. This is NOT a supported configuration in GPFS 1.5 Failed to mount the filesystem on all cluster nodes. Mount them yourself The filesystem is successfully mounted ERROR: Cannot remove GPFS filesystem - no GPFS cluster exists ERROR: Cannot remove GPFS cluster - no GPFS cluster exists The following GPFS filesystems will be deleted prior deleting the GPFS cluster: The following GPFS filesystems are detected: The GPFS filesystems must be deleted before deleting the GPFS Cluster File System Name: No GPFS filesystems defined The PowerHA SystemMirror cluster is not synchronized. Determining HA network for use with GPFS ERROR: No PowerHA SystemMirror network detected. PowerHA SystemMirror topology must be configured prior to creating GPFS cluster. ERROR: No PowerHA SystemMirror nodes detected (empty HACMPnode ODM). PowerHA SystemMirror topology must be configured prior to creating GPFS cluster. GPFS adapters not found. None of the HA networks are eligible for use with GPFS Configuring GPFS Cluster... Creating GPFS cluster Creating GPFS nodeset (HAgpfs01) ERROR: Failed to read HACMPgpfs ODM Deleting HAgpfs01 nodeset ERROR: mmdelnode command failed. Deleting GPFS Cluster ERROR: mmdelcluster command failed. Force deleting GPFS cluster .. ERROR: Failed to read HACMPadapter ODM Synchronizing GPFS ODM Failed to create HACMPgpfs ODM ERROR: Failed to clear GPFS ODM ERROR: Failed to read HACMPgpfs ODM ERROR: The GPFS cluster ID in HACMPgpfs ODM does not match with the one configured. No nodes in HACMPgpfs to unmount the filesystem ERROR: Failed to get cluster nodes from HACMPnode ODM. GPFS ODM synchronization failed Synchronization completed successfully Verifying whether all PowerHA SystemMirror Cluster nodes are available. NOTE: This may take a few minutes. ERROR: No GPFS nodes detected (empty HACMPgpfs ODM). ERROR: mmaddcluster failed for node name %1$s. ERROR: mmaddnode failed for node name %1$s. Please start PowerHA SystemMirror and mount the GPFS filesystems manually (for the first time) on the new nodes. ERROR: Failed to set new Primary server. ERROR: mmdelnode failed for node name %1$s. ERROR: mmdelcluster failed for node name %1$s. Please start PowerHA SystemMirror and mount the GPFS filesystems manually (for the first time) on the new nodes. ERROR: Failed to delete nodes from the GPFS configuration. ERROR: Failed to update HACMPgpfs ODM ERROR: Unable to remove ODMSTANZA for %1$s.ERROR: Invalid ParametersOperation Failed.The filesystem name is invalid. The mount point must be an absolute path Selected hdisks are not valid hdisks. GPFS is using single-node quorum policy.. GPFS is using multi-node quorum policy.. Stopping GPFS daemons using mmshutdown (if required) Changing the GPFS Primary Server to %1$s GPFS successfully deleted %1$s from configuration. GPFS Reconfiguration failed to delete the node %1$s from the configuration. Please review the output of the above commands. Removing %1$s from the configuration. ERROR: Could not modify configuration to use single-node quorum policyGPFS Switching to single-node quorum policy GPFS Switching to multi-node quorum policy GPFS Adding %1$s to the GPFS configuration Updating GPFS configuration. WARNING: GPFS Detected an unsupported configuration %1$s: Waiting up to %2$s seconds for telinit command to complete. %1$s: telinit command failed to complete after %2$s attempts. Usage : %s [grace period] Usage : %s Creating %1$s GPFS filesystem INTERNAL ERROR: The adapter list is empty (createNodeListFile) Failed to select GPFS adapters (createNodeListFile) ERROR: Failed to open the nodeList file:%1$s (createGPFSCluster). GPFS will use %1$s as the primary server ERROR: GPFS command mmcrcluster failed. INTERNAL ERROR: Failed to create a tmp file (%1$s). Force option is set. GPFS does not verify whether the disks belong to other GPFS filesystem Processing %1$s filesystem Unmounting %1$s Failed to unmount %1$s Deleting %1$s Failed to Force delete GPFS configuration on %1$s. ERROR: Failed to obtain data from %1$s. Starting topology services on %1$s Starting group services on %1$s Starting event management services on %1$s Starting GPFS service on %1$s ERROR: Failed to run mmlscluster GPFS command (GPFS cluster ID in ODM = %1$s) Mounting %1$s on node %2$s ERROR: Failed to mount %1$s on %2$s Unmounting %1$s on %2$s ERROR: Failed to unmount %1$s on %2$s %1$s: Could not determine alias address for heartbeat for adapter %2$s on node %3$s ERROR: Open Failed. (file:%s) Check disk space and filesystem permissions Waiting for GPFS to synchronize the data Failed to mount %1$s on node %2$s. Retrying... ERROR: Failed to mount %1$s on node %2$s after %3$d attempts. ERROR: There are no networks that can be used for GPFS communications. Configure a PowerHA SystemMirror network that contains only node-bound service labels OR a boot-only network. ERROR: No nodes defined in the PowerHA SystemMirror cluster. Please configure a PowerHA SystemMirror cluster before configuring GPFS cluster. Determining a node-bound service adapter only network for GPFS use. No network with node-bound service adapters qualify for GPFS communication or no network with node-bound service only adapters exists. Determining a boot-only network for GPFS use. PowerHA SystemMirror configuration does not have a boot-only network for GPFS use. Detected a boot-only PowerHA SystemMirror network %1$s for GPFS communications. Detected a node-bound service only PowerHA SystemMirror network %1$s for GPFS communications. No PowerHA SystemMirror network with node-bound service only adapters detected. No network with IPAT via ALIAS can be used for GPFS The following network interfaces will be used for GPFS communications Detected subnet (reference node %1$s): %2$s (IP=%3$s) Could not detect any subnets on node %1$s for %2$s. This network cannot be used for GPFS communications. %1$s has the following subnets configured: Verifying adapters for %1$s subnet on remaining cluster nodes %1$s has no adapters for subnet %2$s. %1$s cannot be used for GPFS. GPFS will use adapter %1$s on node %2$s ERROR: There are no adapters that are compatible for existing GPFS configuration on node %1$s and %2$s. A possible reason for the error shown above was due to the /var or / filesystem being completely or partially full. /var is %1$s%% full and / is %2$s%% full. Please correct this problem by removing obsolete files in, or increasing the size of, /var and /. %1$s: Checking cluster state for Forced down WARNING: clstop (forced) called while cluster is not stable. Current state is %1$s WARNING: clstop (forced) called when other cluster nodes are forced down. Forced down should only be used on one cluster node at a time. The following nodes are currently forced down: %1$s: The following node(s) had Event Script Failure: %1$s: The node(s) listed above should be recovered from Event Script failure before cluster services on node (%2$s) can be brought down with "Unmanage Resource Groups" option. %1$s: Node (%2$s) had Event Script Failure previously. Stopping cluster services with "Unmanage Resource Groups" option may put some resource groups into unmanaged state, after which event processing will continue. %1$s: Node %2$s is already stopped using the "Unmanage Resource Groups" stop option. No action will be taken on %3$s. %1$s: ERROR: The cluster is in migration. Stopping cluster services with "Unmanage Resource Groups" option is not allowed until the migration is complete. Usage: clRGmove -g [-n | -r | -a] [-m | -u | -d] [-p] [-i] [-s true | false ] ERROR: Cluster services are not running on the local node. ERROR: Resource group named %1$s does not exist. # To choose all nodes in the resource group, select # "All_Nodes_in_Group" below. All_Nodes_in_Group # To choose a single node, select one below. # *Denotes Originally Configured Highest Priority Node ERROR: Cannot use -r option with a concurrent type group. ERROR: Cannot use -a option with a non-concurrent type group. ERROR: There are no available nodes to acquire resource group %1$s. ERROR: Resource group %1$s is not online. ERROR: Destination node %1$s is not up. ERROR: Destination node %1$s is not able to acquire group %2$s. ERROR: Resource group %1$s is already online on destination node %2$s. ERROR: Resource group %1$s is already online. ERROR: Node %1$s is not up. Attempting to move resource group %1$s to the highest priority available node. Attempting to move resource group %1$s to node %2$s. Attempting to bring group %1$s online on the highest priority available node. Attempting to bring group %1$s online on node %2$s. Attempting to bring group %1$s offline on node %2$s. Failed to queue resource group movement event in the cluster manager. Waiting for the cluster to stabilize...Resource group %1$s is online on node %2$s. Resource group movement successful. Resource group %1$s is online on node %2$s. ERROR: Resource group movement unsuccessful. Resource group %1$s is not online. To trouble shoot why this occurred, please refer to the hacmp.out log file and the documentation. Resource group movement successful. Resource group %1$s is offline on node %2$s. ERROR: Resource group movement unsuccessful. Resource group %1$s is not offline on node %2$s. To trouble shoot why this occurred, please refer to the hacmp.out log file and the documentation. Stopping resource group movement processing and exiting. ERROR: The cluster is not stable, aborting. ERROR: Cannot move a resource group with 'Online On All Available Nodes' startup policy from one node to another. These resource groups can either be brought online or taken offline Waiting for the cluster to process the resource group movement request...ERROR: There are no nodes hosting resource group %1$s. It cannot be brought offline. %1$s: Called for %2$s %3$s on node %4$s%1$s: Calling user specified notify method %2$s%1$s: Calling user specified cleanup method %2$s%1$s: Calling user specified restart method %2$sPowerHA SystemMirror cluster not found Changing Settling time: Old value: %1$s Seconds New value: %1$s Seconds There are no resource groups in the OFFLINE or ERROR state.There are no resource groups in the ONLINE or ERROR state.There are no resource groups in the ONLINE state.There are no nodes available to acquire group %1$s.ERROR: The cluster is in a migration state. User Requested rg_move events are not supported during migration. ERROR: Resource group movement unsuccessful. Resource group %1$s did not move to node %2$s. ERROR: Event processing has failed for the requested resource group movement. The cluster is unstable and requires manual intervention to continue processing. # Resource Group State Node(s) / Site # Node Site # The owner node for the resource group is no longer # available. Continuing with this operation will mark the # resource group as OFFLINE but the resources of the resource # groups will not be released by PowerHA SystemMirror. # Before bringing the resource group ONLINE on another node please # make sure that all the resources are stopped manually. Mark_OFFLINE ERROR: Settling time must be a positive number. ERROR: Resource group %1$s is online. ERROR: Resource group %1$s is not online. ERROR: Resource group %1$s cannot be moved because resource groups that depend on it are still online. Resource groups listed above should be brought offline prior to moving resource group %2$s. ERROR: Resource group %1$s cannot be brought online because resource groups it depends on are not online. Resource groups listed above should be brought online prior to bringing resource group %2$s online. ERROR: Resource group %1$s cannot be brought offline because resource groups that depend on it are still online. Resource groups listed above should be brought offline prior to bringing resource group %2$s offline. # Resource groups in node or site collocation configuration: # Resource Group(s) State Node / Site Attempting to move group %1$s to the highest priority available site. Attempting to move group %1$s to site %2$s. Error Encountered in resource group movement! # To choose the highest priority available site for the # resource group, and to remove any Priority Override Location # that is set for the resource group, select # "Restore_Node_Priority_Order" below. Restore_Node_Priority_Order # To choose a specific site, select one below. # *Denotes Originally Configured Primary Site ERROR: Cannot move resource group (%1$s) with 'Online On All Available Nodes' startup policy from one node to another. These resource groups can either be brought online or taken offline ERROR: PowerHA SystemMirror cannot determine on which nodes resource group %1$s should be brought online, since nodes from multiple sites are available. Please select target nodes individually, instead of using the (All_Nodes_in_Group) option. ERROR: PowerHA SystemMirror cannot determine on which nodes resource group %1$s should be brought online secondary, since nodes from multiple sites are available. Please select target nodes individually, instead of using the (All_Nodes_in_Group) option. Attempting to bring group %1$s online secondary on the highest priority available node. Attempting to bring group %1$s online secondary on node %2$s. Attempting to move secondary instance(s) of group %1$s to site %2$s. Resource group %1$s is online secondary on node %2$s. ERROR: Resource group movement unsuccessful. Resource group %1$s is not online secondary. Resource group movement successful. Resource group %1$s is online secondary on node %2$s. ERROR: Resource group movement unsuccessful. Resource group %1$s is not online secondary. Resource group %1$s is online on site %2$s. Resource group %1$s is online secondary on site %2$s. Resource group movement successful. Resource group %1$s is online on site %2$s. Resource group movement successful. Resource group %1$s is online secondary on site %2$s. ERROR: Destination site %1$s is not up. ERROR: Destination site %1$s is not able to acquire group %2$s. ERROR: Resource group %1$s is already online at destination site %2$s. ERROR: No sites defined. Sites must be defined to use this option. ERROR: Resource group %1$s is in the %2$s state To trouble shoot why the resource group went into this state, please refer to the hacmp.out log file and the documentation ERROR: Resource Group %1$s did not move to node %2$s. It is currently on node %3$s. To trouble shoot why this occurred, please refer to the hacmp.out log file and the documentation. ERROR: Resource Group %1$s did not move to site %2$s. It is currently on site %3$s. To trouble shoot why this occurred, please refer to the hacmp.out log file and the documentation. %1$s: HAGEO installation detected. HAGEO does not support dare in an active cluster. ERROR: Cannot refresh clcomdES subsystem on node %1$s %1$s: Site definitions detected in the installation. Dare is not supported in an active cluster with sites defined. %1$s: RG Dependency definitions detected in the installation. Dare is not supported in an active cluster with RG Dependencies defined. %1$s: ERROR: Unable to perform a Dynamic Reconfiguration operation. The active cluster definition stored in %2$s is either empty or corrupted. The continued operation and integrity of PowerHA SystemMirror on this node (%3$s) cannot be guaranteed. It is recommended that this node be rebooted as soon as possible. You are advised to call IBM support if this does not resolve the issue. %1$s: Detected changes to service IP label %2$s. Please note that changing parameters of service IP label via a DARE may result in releasing resource group %3$s. %1$s: Resource group Dependency definitions detected in the installation. The required minimum level of the cluster.es.server.rte fileset for DARE with resource group dependency configuration is %2$s on all cluster nodes. %1$s: Detected changes to netmask(/prefix length) attribute of service IP label %2$s. Please note that changing netmask(/prefix length) parameter of service IP label via a DARE is not supported. %1$s: ERROR: Invalid address family for IP address "%2$s". ERROR: Destination node %1$s is not able to acquire resource group %2$s.Please check the cluster configuration if any dependencies are configured with resource group. ERROR: Resource group %1$s cannot be moved because resource groups that depend on it are still online. Resource groups listed below should be brought offline prior to moving resource group %2$s. %1$s: Detected changes to network attribute %2$s. Please note that changing parameters of network attribute via a DARE may result in releasing resource group %3$s. Usage: %s UP DOWN UNKNOWN JOINING LEAVING UNSTABLE STABLE ERROR RECONFIG Address: %1$s Label: %2$s State: Network Name: %1$s State: Node Name: %1$s State: Obtaining information via SNMP from Node: %1$s... Cluster Name: %1$s Cluster State: Cluster Substate: %1$s: Unable to discover the name of the local node. Please check the cluster configuration. %1$s: Unable to determine any nodes with active cluster services. %1$s: Waiting for the Cluster SMUX Peer Daemon (clsmuxpdES) to stabilize...Error obtaining information via SNMP. Exiting. Usage: %s [-a] [-c] [-C] [-f true|false] [g] [h] [i] [-l] [-o odmdir] [-r] [-R] [s] [-N filename] -n filename [-m methodlist] [-d description] [-e] Usage: %1$s [-a] [-c] [-C] [-d description] [-e] [-f true|false] [-g] [-h] [-i] [-l] [-m methodlist] -n filename [-N filename] [-o odmdir] [-q] [-r] [-R] [s] [-t]%1$s: Restoring default configuration from active configuration. %1$s: ERROR: 'reset' option (-t) is only valid with 'create'. %1$s: ERROR: Cannot use 'reset' option (-t) in an active cluster. %1$s: %2$s error(s) found. 'force' flag has been specified, continuing. %1$s: Synchronizing cluster configuration to all cluster nodes... %1$s: Modification of /etc/syslog.conf failed on node %2$s. %1$s: Refresh of syslogd subsystem failed on node %2$s. COMMAND %1$s returned %2$s. %1$s: Failed executing command: %2$s on node %3$s. Aborting. Collecting GPFS configuration data. GPFS configuration data from node: %1$s. GPFS configuration from GPFS commands End of GPFS configuration data. ERROR: Failed reset of cluster tunables. Failed operation on %1$s, error code was %2$s %s: Resetting cluster tunables. %s: Succeeded resetting of cluster tunables. %s: ERROR: Cannot reset cluster tunables in an active cluster. %s: ERROR: Reset option (-t) is only valid with the create option (-c). Usage : %s [-m] [-l] [-s] [-b] [-i] %1$s: Cluster snapshot will be created with name [%2$s]. WARNING: Cluster %1$s Failed while running event [%2$s], exit status was %3$s Check hacmp.out on node %s for errors. Check hacmp.out on this node for errors. %1$s: Volume group %2$s not found. Could not run clfilecollection -u on node %s. Usage : %s [-a] [clstrmgrES|clinfoES|clsmuxpdES|clcomdES] Usage : %1$s [-m] [-b] [-D] [-i] [-G] [-C arg] [-v] Usage : %s [-boot] [-i] [-b] [-N | -R | -B] [-r] Disk discovery file zero length or not found, please do: smit PowerHA SystemMirror Extended Configuration Discover PowerHA SystemMirror-related Information from Configured Nodes %s none none %s PVID %s is not in Cross-Site LVM Mirroring Configuration false true ERROR: A change has been detected in the Resource Group Distribution Policy. Changing the distribution policy is not supported in an active cluster environment. %1$s: Completed request to suspend monitor(s) for application %2$s. %1$s: Completed request to resume monitor(s) for application %2$s. %1$s: The following monitor(s) are in use for application %2$s: %1$s: Failed to suspend monitor(s) for application %2$s. %1$s: Failed to resume monitor(s) for application %2$s. %1$s: Waiting for the Cluster SMUX peer (clstrmgrES) to stabilize...Cluster Verification detected cluster configuration errors on node %1$s. Detailed clverify output is available in file %2$s on node %1$s. Cluster verification is complete on node %1$s with %2$s errors detected. %1$s: cl_rsh had exit code = %2$s, see cspoc.log and/or clcomd.log for more information The line was successfully added to %1$s The line is already present in %1$s File %1$s does not exit ERROR: Internode Communication check failed, check clcomd.log file for more information. Snapshot Message Authentication Mode is set to '%1$s' you need to generate a key of this type via SMIT, then distribute this key to ALL nodes manually (ftp,rcp etc), run '/usr/es/sbin/cluster/utilities/clkeygen -kl' on ALL nodes to activate the key and try to apply the snapshot again ERROR: Invalid Distribution Policy. The distribution policy should be either 'node' or 'network'. WARNING: The 'network' distribution policy is deprecated and will be removed from the product in future. WARNING: The 'network' Resource Group Distribution Policy is deprecated and will be removed from the product in future. Changing the global Resource Group Distribution Policy.. Old Policy: %1$s New Policy: %1$s LPAR CPUs: %1$s.CoD CPUs: %1$s.LPAR memory: %1$s MB.CoD Memory: %1$s MB.INFO: Increasing the RSCT heartbeat interval to %1$s minutes to facilitate On Demand resource allocation. INFO: Restoring the RSCT heartbeat interval to %1$s minutes. INFO: Processing On Demand resources for application %1$s. INFO: Available On Demand resources before processing application %1$s: INFO: Available On Demand resources after processing application %1$s: ERROR: The available amount of CPUs is less than the required minimum CPUs for resource group %1$s. ERROR: The available amount of memory is less than the required minimum memory for resource group %1$s. ERROR: %1$s command failed. Return code: %2$s ERROR: Topology refresh failed. The Topology services' DMS timer did not increase. ERROR: Topology refresh failed. Failed to restore the Topology services' DMS timer. ERROR: %1$s command timed out. The command did not complete in %2$s seconds. The following On Demand resources were acquired for application %1$s. LPAR CPUs: %2$s CoD CPUs: %3$s LPAR memory: %4$s MB CoD memory: %5$s MB .LPAR CPUs: %1$s CoD CPUs: %2$s LPAR memory: %3$s MB CoD memory: %4$s MB .whichever node has the most free memorywhichever node has the least cpu usagewhichever node has the least disk usageplacement is decided dynamically using a user-specified resource variable WARNING: Node %1$s does not appear to have any network interfaces. WARNING: No interface name found for %1$s on %2$s. WARNING: Interface %1$s does not exist on node %2$s. WARNING: No applications found on cluster. WARNING: Resource group %1$s not found. WARNING: No application start script configured for application %1$s. WARNING: No application stop script configured for application %1$s. WARNING: Service label '%1$s' was not found on this cluster. WARNING: Service label '%1$s' does not exist on this cluster. WARNING: Unable to determine cluster status. WARNING: Unable to determine cluster substate. WARNING:'%1$s' was found but a problem occurred while attempting to fetch state. WARNING: Unable to locate interface '%1$s' on node '%2$s'. WARNING: An unknown problem occurred while executing the ip_of_interface subroutine. ERROR: Unable to find the network '%1$s' in the Management Information Base. WARNING: Application '%1$s' does not belong to any resource group. WARNING: Could not determine the state of application '%1$s'. ERROR: Unable to determine the dynamic node priority policy for application '%1$s'. Application '%1$s' belongs to a resource group which is configured to run on all it's nodes simultaneously. No fallover will occur. ERROR: An unknown problem occurred in subroutine 'print_fallover_node'. ERROR: '%1$s' is not a service label. updownunknownjoiningleavingunstablestableerrorreconfigactiveinactiveonlineofflineoffline/unknownacquiringreleasingprocessnotifyfallover WARNING: translate() was called on the word '%1$s' which is not in translation table. on first available nodeon home node onlyon all available nodesto next priority node in the listusing dynamic node priority to determine targetbring offline on error nodeneverif higher priority node becomes available ERROR: SNMP information file %1$s not found. Cluster: %1$s Cluster services: %1$s State of cluster: %1$s Substate: %1$s APPLICATIONS Cluster %1$s provides the following applications: Nodes configured to provide %1$s: Nodes currently providing %1$s: Node currently providing %1$s The node that will provide %1$s if %2$s fails is: %1$s is started by %2$s %1$s is stopped by %2$s Resources associated with %1$s: No resources are associated with %1$s. This application is part of resource group '%1$s'. The resource group policies: Startup: %1$s Fallover: %1$s Fallback: %1$s Interfaces configured to provide %1$s: with IP address: %1$s on interface: %1$s on node: %1$son network: %1$sShared Volume Groups: Concurrent Volume Groups: Filesystems: AIX Fast Connect Services: No application monitors are configured for %1$s. Application monitors for %1$s: Monitor name: %1$s Type: %1$s Monitor method: %1$sMonitor interval: %1$s seconds Hung monitor signal: %1$s Process monitored: %1$s Process owner: %1$s Instance count: %1$s Stabilization interval: %1$s seconds Retry count: %1$s Restart interval: %1$s Failure action: %1$s Notify method: %1$s Cleanup method: %1$s Restart method: %1$s TOPOLOGY %1$s consists of the following nodes: Network interfaces: with IP address: %1$s on interface: %1$s on node: %1$s on network: %1$s Restore PowerHA SystemMirror Configuration Database from Active Configuration and try again ERROR: Cannot refresh clcomdES subsystem Usage: %s [-d | -s -i -N -b -V -R -S] This screen cannot be accessed because the local node has not been properly configured. Please synchronize the cluster and ensure that all errors have been resolved. The %s utility is no longer supported. Please use "smit PowerHA SystemMirror" facility to perform the required functionality. Updating ODM stanzas for Automatic Error Notifications... Usage: %s [-a|-v] [clstrmgrES|clinfoES|clcomdES] Status of the RSCT subsystems used by PowerHA SystemMirror: Status of the PowerHA SystemMirror subsystems: Status of the optional PowerHA SystemMirror subsystems: Status of the CAA subsystems: Details of PowerHA SystemMirror cluster manager: Following this event you might see resource group acquire and release events that would result in moving affected resource groups. Configure pre/post events to resource_state_change and resource_state_change_complete events to customize pre-resource group move and post-resource group move actions.This event concludes the resource group movements. Configure pre/post events to resource_state_change and resource_state_change_complete events to customize pre-resource group move and post-resource group move actions. WARNING: Cluster services on node %1$s have been set to start with 'manually process resource groups' option. Because of this, the resource groups will not come online after the cluster services have started. Use the 'PowerHA SystemMirror Resource Group and Application Management' SMIT screen under 'System Management (C-SPOC)' to bring the OFFLINE resource groups to the ONLINE state. This might take some time. Please refer to %1$s for more information. ERROR: The RSCT services cannot be stopped on a node where PowerHA SystemMirror is actively managing the resources. Please note that stopping RSCT services will also affect the other RSCT dependent services such as the Enhanced Concurrent Volume Groups. WARNING: There are Enhanced Concurrent Volume Groups varied'on on this node. Stopping the RSCT services will varyoff the Enhanced Concurrent Volume Groups which may affect the running applications. If you want to continue with stopping RSCT services, please retry by selecting the 'Force' option in the SMIT screen. Stopping grpsvcs...Stopped grpsvcs.Stopping topsvcs...Stopped topsvcs.There were errors while stopping the RSCT services. Please check logs above Usage: cl_add_comm_devices [Nodename Devicename ] Committing any changes, as required, to all available nodes... Usage: %s -n -w -a 'App_IP_Addr1=Boot_Addr1...' [-d [YES] ERROR: Stopping RSCT services is allowed only on the local node. ERROR: RSCT services are not active on the local node. WARNING: Group Services on the local node are not currently active. WARNING: Topology Services on the local node are not currently active. ERROR: Failed to send stop request to the cluster manager process. ERROR: The cluster manager process did not stop as expected. The RSCT services will not be stopped. %s: ERROR: The cluster is in migration. Please select only the local node to start the cluster services until the migration is complete. Cluster manager subsystem (clstrmgrES) is not active. Updating the subsystem definition. Cluster manager subsystem (clstrmgrES) is active, and cluster services are inactive. The subsystem will be restarted automatically with the new debug level settings. Cluster manager subsystem (clstrmgrES) is active, and cluster services are active. The subsystem definition will be updated and the new debug level settings will also take effect on the active cluster manager daemon. Resource group %1$s is online on site %2$s. Usage: %1$s [-s] ERROR: Couldn't determine local nodename. Please ensure the cluster topology has been successfully synchronized. Detected previous unexpected software or node failure during startup. Collection First Failure Data Capture in directory %1$s. Please review the PowerHA SystemMirror logs and report any problems to IBM Service. FFDC collection could not run because of a lack free space in /tmp, available %1$d, required %2$d. FFDC collection script failed. This is an unexpected failure in script /usr/es/sbin/cluster/utilities/cl_sel, please contact IBM Service. FFDC event log collection saved to %1$s Usage: %1$s [-a][-u] ERROR: clmkSnmpErrnotify failed to add SNMP Traps errnotify stanza FFDC collection could not run because of a lack free space in %1$d, available %2$d, required %3$d. %s : Unable to start %s via SRC. %1$s : Unable to register NFSv4 node instance %2$s. %1$s : Unable to register service label %2$s with NFSv4 node instance %3$s. Usage : %s 'hostname_given_root_access' 'v3_exports' [ 'v4_exports' ] %1$s : Unable to unregister NFSv4 node instance %2$s. %1$s: Unable to mount %2$s. %1$s: Unable to unmount %2$s. %1$s: Failed to get the current state of WPAR '%2$s'. %1$s: WARNING: WPAR '%2$s' is in the broken state and will not be used. Resource group %3$s will be run in the global WPAR. %1$s: Unable to start WPAR '%2$s' for resource group %3$s. %1$s: Node %2$s has level %3$s of cluster fileset %4$s installed. The minimum required level for the requested operation is %5$s. %1$s: This command must be run from a node with active Cluster Services. DARE is not supported. Cluster services must be stopped Corrective actions will not be performed if only Synchronize is specified. Corrective actions will only be performed when the 'Verify, Synchronize or Both' field is set to either 'Both' or 'Verify' Usage: %1$s %2$s Usage: %1$s nodename resource_group_name %1$s: Failed to enable multi-node disk heartbeat network using device %2$s. %1$s: Failed to disable monitoring of multi-node disk heartbeat network using device %2$s. Usage: %1$s nodename resource_id [monitor name] %1$s: sisraidmgr failed for %2$s Fenced volume group %1$s away from node %2$s Calling notify method %1$s for loss of quorum on volume group %2$s in resource group %3$s Taking resource group %1$s offline due to loss of quorum on volume group %2$s Stopping PowerHA SystemMirror on %1$s due to loss of quorum on volume group %2$s Halting %1$s due to loss of quorum on volume group %2$s Failure in process of finding all disk heart beat disks Volume group %1$s has no disk heart beat disks Unable to access disks heart beat disk %1$s in volume group %2$s in resource group %3$s shared with node %4$s %1$s: Failed to enable multi-node disk heartbeat network using device %2$s. %1$s: Unable to start Resource Monitoring and Control (ctrmc) via SRC %1$s: Unable to start Concurrent Logical Volume Manager (gsclvmd) via SRC %1$s: An active Cluster Manager was detected elsewhere in the cluster. This command must be run from a node with an active Cluster Manager process in order for the Dynamic Reconfiguration to proceed. %s: A communication error prevents obtaining the VRMF from remote nodes. A DARE cannot be run until this is corrected. Please ensure clcomd is running. %s: An internal parsing error in migcheck.c has prevented determining if a migration is in progress. %s: Internet MIB tree not enabled. Please refer the PowerHA readme file for more information on how to enable it. %1$s: Dynamic Reconfiguration requires atleast %2$s Kbytes free in /tmp on all nodes. Node %3$s has only %4$s Kbytes available in /tmp. Make sure that %5$s has at least %6$s Kbytes in /tmp and try again. %1$s: Error: Activate NFS crossmounts should be set to true while starting cluster services with Manage Resource Groups set to Automatically. %1$s: Warning: NFS crossmounts will be activated for Resource groups that are active on remote node. %1$s: Interactive correction of errors cannot be honored while starting cluster services at system boot time. Hence no Interactive error correction would be performed. %1$s: Cluster services will be started manually. Hence Resource groups will not be brought online. Please bring Resource groups online manually. %1$s: Warning: You have selected to start cluster services during system boot time. Interactive correction of errors at system boot time cannot be honored. No error correction would be performed. %1$s: An available adapter for service label %2$s on network %3$s could not be found. %1$s: ERROR: Unable to perform a Dynamic Reconfiguration operation. The cluster is not in STABLE state. There might be events running on the Cluster. Please retry after Cluster comes to a Stable state. %1$s: Volume group %2$s could not be fenced away from node %3$s%1$s: Volume group %2$s fence height could not be set to %3$s%1$s: Unable to access disk %2$s in volume group %3$s in resource group %4$s shared with node %5$s%1$s: Problem refreshing Timer Services. %1$s: Configuring a %d node cluster in AIX may take as long as %d minutes, Please wait. %1$s: Error: CAA cluster services are not active on this node. %1$s: Error: RSCT cluster services (cthags) are not active on this node. %1$s: Try bringing up CAA and RSCT with the following command: startsrc -g caa. %1$s: Contact IBM support if the problem persists. %1$s: (rc=%2$d) Failed to define tie breaker disk %3$s%1$s: (rc=%2$d) Failed to configure RSCT Quorum Tie Breaker%1$s: (rc=%2$d) Failed to configure CAA site merge policy%1$s: (rc=%2$d) Failed to configure RSCT Quorum Tie Breaker%1$s: (rc=%2$d) Failed to configure RSCT Critical Resource Protection Method%1$s: (rc=%2$d) Failed to configure RSCT Quorum Type%1$s: (rc=%2$d) Failed to configure RSCT Critical Mode%1$s: (rc=%2$d) The disk %3$s cannot be configured with the reserve policy (PR_exclusive) required for it to function as a Tie Breaker %1$s: Failed to stop clxd service on node '%2$s'. rc=%3$d. %1$s: clxd daemon could not be stopped after %2$d retries. %1$s: Failed to start clxd service on node '%2$s'. rc=%3$d. %1$s: clxd daemon could not be started after %2$d retries. %1$s: (rc=%2$d) Failed to create Reservation file for the PowerHA_NFS_TieBreaker profile %1$s: NFS Tie-Breaker Parameter missing. %1$s: (rc=%2$d) Failed to create PowerHA_NFS_TieBreaker profile%1$s: WARNING: Consistency Group "%2$s" has Change Volumes defined. This use is allowed, but not supported by PowerHA SystemMirror, and may cause later failures.%1$s: Tie breaker disk %2$s cannot be accessed '%1$s': ERROR: WPAR '%2$s' is not active. Usage: clsnapshotinfo [-m [ [-c] Resume cluster event processing after a cluster event fails. Default action is to resume event processing at the next step. Specifying -c will cancel any remaining event processing. Note that using the -c option will put any resource groups that are in indeterminate states into the ERROR state and will require manual recovery. %1$s: ERROR: the -c option is not supported in a mixed version cluster. You must complete the migration to version %2$d before using this option. %1$s[%2$d]: No twin found for %3$s. %1$s[%2$d]: statd is not up on the local node %1$s: An error occurred while attempting to add a default first adapter to the new cluster. Location of cluster.log file could not be found. Please report this problem to IBM support. Not able to restart the syslogd daemon. Try restarting the syslogd daemon using the following commands %1$s If the problem persists, contact IBM support. Unable to set preferred read copy %1$s: lsfs %2$d returns %3$d%1$s: Refresh of subsystem %2$s is failed with following error. %3$s %1$s: Removing cluster snapshot: %2$s ERROR: Multiple Resource Groups are not allowed with -a option. WARNING: LVM Preferred Read for volume group %1$s is set to %2$s, but the associated storage location, %3$s, is not configured for any mirror pool copy. Hence the LVM Preferred Read setting will be overridden as roundrobin so that AIX will decide which copy needs to be used while reading the data. %1$s: (rc=%2$d) Failed to configure RSCT critical daemon restart grace period. %1$s: (rc=%2$d) Failed to configure RSCT critical daemon restart grace period for node %3$s. %1$s: (rc=%2$d) Successfully configured RSCT critical daemon restart grace period for node %3$s. ERROR: Cannot perform clRGmove operation for both concurrent and non concurrent resource groups at a time. %1$s (rc=%2$d) Failed to create %3$s profile%1$s:%2$d (rc=%3$d) Failed to configure RSCT Quorum Tie Breaker to %4$s%1$s:%2$d (rc=%3$d) Failed to configure RSCT Quorum Tie Breaker while setting PostReserveWaitTime%1$s: Info: You have selected to start cluster services during system RESTART time. Cluster services would not be started now. %1$s: File system %2$s is mounted on one of the nodes in the cluster, hence skipping mounting the file system. Usage: %s [-h][-C][-D][-v] [-o ODMDIR] -N "node1:node2:..." DEBUG working directory available at: %s Error: The following node(s) %s do not participate in the active cluster configuration. Cluster services are running on nodes: %s Creating cluster snapshot %s on node: %s Error: Unable to verify the local cluster configuration. WARNING: The PowerHA SystemMirror log path for autoverify.log does not exist in the PowerHA SystemMirror configuration. This type of problem usually occurs during a migration, please check the /tmp/clconvert.log file for errors. Error: Unable to transfer ODM: %s from node %s to local directory %s WARNING: Unable to contact node: %s while attempting to gather LPP package information for mixed service level detection. WARNING: Node %s does not match the configuration on active node: %s Error executing: "%s" Error: %s: directory not available. Error: Unable to get local host name. Error: %s: unable to copy file. Error: %s: unable to create file. Error: No nodes in cluster or no access to cluster ODM. Error: Write access denied for %s Error: Must be root to run this program. Error: %s ${MSG[1]}Error: Unable to synchronize HA configuration from node %s to %s. Please check to ensure connectivity exists between these two nodes. %s %s (expected %s) ERROR: Found mismatched package levels on node "%s" Please upgrade the following packages: %s WARNING: Found mismatched package levels on node "%s" Please upgrade the following packages: %s Verifying Cluster Configuration Prior to Starting Cluster Services. Verifying node(s): %s against the running node %s There are no active cluster nodes to verify against. Verifying node(s): %s requested to start Cluster services will not start on node(s): %s Please see the above verification errors for more detail. A trace of the verification warning/error messages above is available in the file: %s/clverify.log Successfully verified node(s):%s Error: Unable to create a snapshot on node: %s. Log file %s contains detailed information about this failure. WARNING: Node(s): %s requested to start cluster services. These nodes are already running cluster services and will not be started. WARNING: Node: %s does not match the cluster configuration of node: %s Synchronizing cluster configuration from node: %s to node: %s ERROR: Cannot find node(s): %s. Please ensure all requested nodes are online.A snapshot for node %s was already created today, a new one will not be created ERROR: The following node(s): %s do not participate in the active cluster configuration on node: %s and will not be started. ERROR: There are no nodes to verify. All specified nodes were dared out of the cluster configuration on node: %s. WARNING: The following node(s): %s are also running automatic verification and synchronization Automatic verification and synchronization will not occur prior to starting cluster services on the selected node(s): %s. Automatic verification and synchronization is disabled, all selected node(s) will start cluster services. ERROR: A change to the local cluster configuration has been made without synchronizing. Automatic verification and synchronization is disabled, therefore cluster services will not start on the selected node(s). Please perform verification and synchronization prior to starting cluster services, either by manually verifying and synchronizing, or by enabling automatic verification and synchronization and then starting cluster services. ERROR: Unable to validate local cluster configuration. Please check to ensure the PowerHA SystemMirror cluster is defined on the local node prior to starting cluster services. WARNING: The following node(s): %s have changed their local configuration and will be synchronized with the active node: %s prior to being started. Any configuration changes made to these nodes will be lost. ERROR: Node(s): %s are not synchronized with the active cluster node: %s and automatic verification and synchronization is disabled. Please enable automatic verification and synchronization in the PowerHA SystemMirror SMIT Extended Configuration, Extended Cluster Service Settings menu, then restart the selected nodes. ERROR: A change to the local cluster configuration has been made without synchronizing. Since automatic verification and synchronization is running on another node, cluster services will therefore not start on the selected node(s). Please perform verification and synchronization prior to starting cluster services, either by manually verifying and synchronizing, or by enabling automatic verification and synchronization and then starting cluster services. Cluster services will not start on node(s): %s Please see the errors for more detail, or a trace of the above Errors/warnings is available in %s/autoverify.log Utility %s has not been implemented for Linux. Cluster services are running at different levels across the cluster. Verification will not be invoked in this environment. Active node(s) need to be synchronized prior to starting inactive one(s)! Please synchronize the cluster and try again. No active node in cluster to synchronize with! Please check your configuration. ERROR: Changes have been made to the Cluster Topology or Resource configuration. The Cluster Configuration must be synchronized before starting Cluster Services. Network name for dependent IP label %s is not specified Verifying the configuration Verification failed Application "%s" is already a part of application group "%s", it can be added only to application group "%s" clssaaconf: Usage error. PowerHA SystemMirror installation is detected, this feature functionality is not supported when PowerHA SystemMirror is installed Node is in Stable State, continue Node is in Unstable State, waiting for Stabilization Command %s failedApplication %s is UP, cannot %s it, please stop the application first, then update/remove it Usage: %s [-s Application] [-g Dependency Group] %s is NOT found: %s Application %s is ONLINE Application %s is OFFLINE Starting Application %s Stopping Application %s Start was successful for Application %s Start was UNsuccessful for Application %s Stop was successful for Application %s Stop was UNsuccessful for Application %s Done Update in progress, please wait ...ERROR: Custom Monitor "%s" is not found in the configuraionApplication "%s" is configured in application dependency. Therefore, it cannot be removed, please remove it from the application dependency first Usage: clca_nfsutil addrg -R -P -T -I -V -n -N -C -S clca_nfsutil modrg -R -I -V -n -N -C -S clca_nfsutil delrg clca_nfsutil add_discover clca_nfsutil modify_discover clca_nfsutil nfs_rgs clca_nfsutil list_ips %1$s: An attempt to create the Stable Storage Filesystem through cl_crlvfs failed. Please check cspoc.log for more information on the failure. If this failure is persistent, an explicit Stable Storage can be specified instead of opting for AUTO_SELECT. %1$s: Could not detect any sharable volumes for the resource group : %2$s .%1$s: Essential arguments are missing. %1$s: Stable Storage is required for the NFSv4 exports. %1$s: This configuration assistant is designed for adding NFS exports to the cluster. Exiting because, the user did not specify any NFS exports. %1$s: Takeover list should have at least one node that is not the primary node. %1$s: Following nodes are specified, but they are not part of the cluster. %2$s Please check the cluster configuration. %1$s: clvt add resource_group failed. %1$s: Specified IP label is not a service IP label. Attempted to make it service IP label but, could not find an appropriate network to place it. Please check the network configuration. %1$s: Specified IP label is not a service IP label. Attempted to make it service IP label but, clvt add service_ip failed. %1$s: clvt modify resource_group %2$s failed. Usage: clca_nfsutil addrg -R -P -T -I -V -n -N -C -S -p clca_nfsutil modrg -R -I -V -n -N -C -S -p clca_nfsutil delrg clca_nfsutil add_discover clca_nfsutil modify_discover clca_nfsutil nfs_rgs clca_nfsutil list_ips Cluster: %1$s Cluster services: %1$s State of cluster: %1$s Substate: %1$s APPLICATIONS Cluster %1$s provides the following applications: %1$s is started by %2$s %1$s is stopped by %2$s State of %1$s:Nodes configured to provide %1$s: No nodes configured to provide %1$sNodes currently providing %1$s: Node currently providing %1$s: %2$s The node that will provide %1$s if %2$s fails is: whichever has the most free memory whichever has the least cpu usage whichever has the least disk usage this is to be decided dynamically using a user-specified RSCT resource variableResources associated with %1$s: No resources are associated with %1$s. Cluster: %1$s Cluster services: %1$s Application:Resources associated with %1$s: No resources are associated with %1$s. This application does not belong to a resource group. This application is part of resource group '%1$s'. Resource group policies: Startup: %1$s Fallover: %1$s Fallback: %1$s Service Labels {offline} Interfaces configured to provide %1$s: with IP address: %1$s on interface: %1$s device: %1$s on node: %1$son network: %1$sShared Volume Groups: Concurrent Volume Groups: Filesystems: AIX Fast Connect Services: No application monitors are configured for %1$s. Application monitors for %1$s: Application monitor of %1$s: %2$s Monitor name: %1$s Type: %1$s Monitor method: %1$sMonitor interval: %1$s seconds Hung monitor signal: %1$s Process monitored: %1$s Process owner: %1$s Instance count: %1$s Stabilization interval: %1$s seconds Retry count: %1$s Restart interval: %1$s Failure action: %1$s Notify method: %1$s Cleanup method: %1$s Restart method: %1$s TOPOLOGY %1$s consists of the following nodes: Network interfaces: WARNING: Node %1$s does not appear to have any network interfaces. WARNING: No interface name found for %1$s on %2$s. WARNING: No interface '%1$s' exists on node '%2$s'. WARNING: No interface '%1$s' exists on node '%2$s'. WARNING: No service label '%1$s' was found on this cluster. WARNING: Unable to determine cluster status. WARNING:'%1$s' was found but a problem occurred while attempting to fetch state. WARNING: Unable to locate interface '%1$s' on node '%2$s'. WARNING: An unknown problem occurred while executing the ip_of_interface subroutine. ERROR: Unable to find the network '%1$s' in the Management Information Base. WARNING: Could not determine the state of application '%1$s'. WARNING: Could not determine the state of application %1$s. ERROR: Unable to determine the dynamic node priority policy for group '%1$s'. Application '%1$s' belongs to a resource group which is configured to run on all its nodes simultaneously. No fallover will occur. ERROR: %1$s is not a service label. customupdownunknownjoiningleavingunstablestableerrorreconfigactiveinactiveonlineofflineoffline/unknownacquiringreleasingprocessnotifyfallover WARNING: translate() was called on the word '%1$s' which is not in translation table. on first available nodeon home node onlyusing distribution policyon all available nodesto next priority node in the listusing dynamic node priority to determine targetbring offline on error nodeneverif higher priority node becomes available ERROR: SNMP information file %s not found. %1$s: ERROR: The following errors exist in %2$s. 'ksh -n %1$s' must be clean before %2$s can be modified. Please fix the problems and run %3$s false and %4$s true %1$s: New errors would exist in %2$s. The new errors are: Reverting to original %1$s. Consult IBM Service to resolve this issue. %1$s: Unable to create backup copy of rc.nfs. %1$s: error modifying rc.nfs. Original file saved as %2$s %1$s: Unable to move old rc.nfs %2$s to %3$s Add the specified WebSMIT server(s) to this cluster's access list.Force the specified action. Suppress all confirmation prompts.Display any available help information.Perform the specified action on this node only, not on the entire cluster.Remove the specified WebSMIT server from the access list of this cluster.Perform all operations with maximum verbosity in the output.The WebSMIT server(s) that are to be added/removed.The wsm_gateway command may be used to make the appropriate changes to the nodes within a cluster to allow the specified WebSMIT server(s) to access that cluster. If no command-line options are specified, the default operation of this command is to list all currently registered WebSMIT servers. This command only needs to be invoked on one node within the cluster to register all nodes within the cluster. The registration of the remaining nodes is handled automatically. Using wsm_gateway is the first step in the overall process of adding a cluster to WebSMIT, and only needs to be performed once. The second, and final, step in the registration process occurs on the WebSMIT server itself, and may be handled directly in the GUI, via the "Add a Cluster" command available in the "Enterprise" view, or at the command-line, via the "wsm_register" command. Validating gateway specification "%1$s"... Currently registered WebSMIT gateways: The "%1$s" WebSMIT server appears to already be registered with this cluster! Successfully registered WebSMIT server "%1$s"! >>> Invoking wsm_gateway on "%1$s"..." UNREGISTERING THE FOLLOWING WEBSMIT SERVERS: WebSMIT access from these servers will no longer be possible. Proceed with the de-registration? [Yy] Removal aborted. WebSMIT server "%1$s" does not appear to be registered with this cluster. Successfully unregistered WebSMIT server "%1$s". ERROR: the "-a" and "-r" flags are mutually exclusive. Resolve IPNetwork Name ERROR: unable to resolve WebSMIT server "%1$s" on the network. WARNING: unable to determine the hostname for "%1$s". ERROR: no valid WebSMIT gateways were specified for the operation. ERROR: no valid operation given for the specified WebSMIT gateway(s). ERROR: failed to register WebSMIT server "%1$s"! ERROR: unable to determine the local node name. ERROR: there are no WebSMIT servers registered with this cluster. ERROR: failed to unregister WebSMIT server "%1$s"! ERROR: failed to back up the "%1$s" file. Usage: wsm_clsnap [-h] [-x] [-q] [-v] -h Usage - this message -x Turn on Debugging -q Quiet mode - suppress terminal messages -v Verbose mode - extra user messages -c Always treat this node as a client node -l Only list the files to be collected 'cp -p %1$s %2$s' returned %3$ld. wsm_clsnap is unable to collect %4$s %1$s copied to %2$s %1$s does not exist %1$s is empty This is not a client node - no PowerHA SystemMirror client filesets are installed. wsm_clsnap will collect no information. Old log files exist under %1$s Issue 'snap -r' to clean up, and re-run wsm_clsnap Insufficient free space %1$s (as returned by 'df -k') in %2$s - a minimum of %3$s is needed 'mkdir -p %1$s' returned %2$ld. wsm_clsnap is unable to create directory %3$s wsm_clsnap is collecting Files... Please Wait wsm_clsnap complete. Run 'snap -gtc' to generate a pax file. This is an PowerHA SystemMirror server node - the PowerHA SystemMirror server filesets are installed. Calling clsnap to collect server information wsm_clsnap %1$s started ERROR: Cannot synchronize cluster changes without a cluster repository defined. ERROR: You must start the clcomd subsystem. Please execute /usr/bin/startsrc -s clcomd ERROR: Problems encountered creating the cluster in AIX. Use the syslog facility to see output from the mkcluster command. ERROR: Creating the cluster in AIX failed. Check output for errors in local cluster configuration, correct them, and try synchronization again. ERROR: Updating the cluster in AIX failed. Check output for errors in local cluster configuration, correct them, and try synchronization again. ERROR: in create_CAA_config and do not have a cluster name or a node. ERROR: Missing cluster name or node name in subroutine create_CAA_config. %1$s: Configuring a %2$d node cluster in AIX may take up to %3$d minutes. Please wait. WARNING: Detected existing AIX cluster. Creation of the AIX cluster may fail. %1$s: Configuring a %2$d node cluster on site %3$s in AIX may take up to %4$d minutes. Please wait. %1$s: Extending the cluster to site %2$s in AIX may take up to %3$d minutes. Please wait. %1$s: Extending the cluster by %2$d nodes on site %3$s in AIX may take up to %4$d minutes. Please wait. ERROR: Problems encountered creating the cluster in AIX. See %1$s and %2$s for the output of the %3$s command. %1$s[%2$s]: Site %3$s does not contain any nodes %1$s[%2$s]: Cluster %3$s does not contain any nodes %1$s[%2$s]: Cannot extend cluster %3$s to a second site because the PowerHA SystemMirror local site ID %4$d does not match the existing AIX site ID %1$s[%2$s]: The PowerHA SystemMirror definition has node %3$s at %4$s but the AIX definition has it at the %5$s site. Checking for any added or removed nodes Checking for added nodes PowerHA SystemMirror Cluster Manager current state is: %sERROR: Updating the cluster in AIX failed. See %1$s and %2$s for the output of the %3$s command. Check the command output for errors in the local configuration, correct them, and try synchronization again. %1$s: "%2$s" cannot be resolved to a valid CAA node name. Check the contents of /etc/cluster/rhosts. The host name has been changed to "%1$s". The PowerHA SystemMirror configuration should be synchronized ("smitty cm_ver_and_sync") at the earliest opportunity An e-fix cannot be applied to CAA at this time because PowerHA SystemMirror is active. PowerHA SystemMirror cluster services should be stopped ("smitty clstop") on this node, and the emgr command retried. ERROR: node "%1$s" is not assigned to a site ERROR: node "%1$s" has been assigned to both sites Updating Split Merge Policies ERROR: node_timeout must be greater than network_fdt by at least 10 seconds. Correct these values and try synchronization again. Usage: cl_udresconfig cl_udresconfig: ERROR:Resource Type %1$s already exists in the configuration. cl_udresconfig: ERROR: Processing order(PROCESS_AT_AFTER) is not specified for resource type %1$s. cl_udresconfig: ERROR: Specified PROCESS_AT_AFTER %1$s for resource type %2$s not found. cl_udresconfig: ERROR:Resource %1$s already exists in the configuration. cl_udresconfig: ERROR: No resource type specified for resource %1$s. cl_udresconfig: ERROR: Specified resource type %1$s for resource %2$s not found either in the existing configuration or in the input xml file. cl_udresconfig: ERROR: Specify an input file .\ncl_udresconfig: ERROR: Input file %1$s not found.\ncl_udresconfig: ERROR : Inuput file xml validation failed. cl_udresconfig: ERROR : User Defined Resource Type verification failed. cl_udresconfig: ERROR : User Defined Resource verification failed. cl_udresconfig: ERROR : Add user defined resource type %1$s failed. cl_udresconfig: ERROR : Add user defined resource %1$s failed. halevel: ERROR: PowerHA SystemMirror does not appear to be installed halevel: usage: halevel [-h|-?] [-s] [-x] -h Usage - this message -s Show Service Pack level -x Turn on debugging %1$s: Invalid option '%2$s' %1$s: The local node name cannot be determined at this time. Private network restrictions cannot be set. %1$s: Unrestricting the following interfaces on %2$s:%1$s: There are no interfaces on private networks on %2$s} %1$s: There are no private networks defined on this cluster %1$s: The following interfaces are now restricted on %2$s:ERROR: Unable to verify outbound clcomd communication to node: %1$s Internode communication check using clcomd failed. Check the following: 1) /etc/cluster/rhosts has IP addresses for all nodes 2) clcomd subsystem is active (lssrc) 3) clcomd.log file. ERROR: Unable to verify inbound clcomd communication from node: %1$s Internode communication check with %1$s using clcomd failed. Check the following on node %2$s 1) /etc/cluster/rhosts on node %3$s has IP addresses for all nodes 2) /etc/cluster/rhosts on %4$s has IP addresses for %5$s 3) clcomd subsystem is active (lssrc) on node %6$s 4) clcomd.log file on node %7$s ERROR: Can't Start the mping receiver thread on the remote node %1$s Check the clcomd.log for errors. ERROR: Multicast communication with node %1$s using mping failed. ERROR: This tool expects to use a directory named %1$s but there is already a file with that name. Saving existing %1$s to %1$s.bak ERROR: This tool expects to use the directory named %1$s, but could not create it. Check path names and permissions. Verifying clcomd communication, please be patient. Verifying multicast communication with mping. Verifying IPv6 multicast communication with mping. Errors encountered verifying communications between nodes. Verifying COMMUNICATION_PATH and CAA Hostname. ERROR: Skipping multicast communication as enough nodes are not available. ERROR: Site does not have any nodes. Multicast communication is not possible. Multicast communication verification between nodes passed. No UUID corresponding to Raw Disk %1$s was found Raw Disk %1$s is not an HACMP resource - no action taken in response to %2$s %3$s A failure on %1$s has already been processed clRMupdate resource_sel_fallover %1$s %2$s %3$s %4$s in response to Event Manager event ERROR: Problem encountered querying the multicast address from AIX. ERROR: A problem occurred setting the %1$s attribute for device %2$s on node %3$s %1$s: Node %2$s has lost access to repository disk %3$s replace the bad disk and try again. Hint: You can use smitty cm_replace_repos.select.dialog to replace the bad disk. WARNING: Repository disk is down for %1$s. As Force option is specified as "true", returning 0 so that cluster services can start. Cluster may not function properly in all the scenarios and cannot perform configuration changes. ERROR: Repository disk is down for %1$s. Cluster is not started. It is not suggested to start the cluster with repository disk down as cluster may not function correctly in all the scenarios and cannot perform configuration changes. If you still want to start the cluster then use FORCE="true" in clmgr command or make Ignore verification errors flag "true" from smitty. Repository disk is down on node(s): %1$s. Use lscluster -d to check the repository state on each node to ensure it has access to a working repository. Hint: You can use "smitty cl_replace_repository_nm" or "clmgr replace repository" command to replace the bad disk. A Merge policy of "Tie Breaker" requires a Split policy of "Tie Breaker" A Merge policy of "Majority" requires a Split policy of "None" A "Tie Breaker" policy has been specified, but no Tie Breaker configured A Tie Breaker has been configured, but no policy uses it. The Tie Breaker is ignored The PowerHA SystemMirror split and merge policies have been updated. The configuration must be synchronized to make this change known across the cluster. The change becomes known only when cluster services are restarted on all nodes. Maximum number of notifications and the default surviving site must both be chosen ERROR: %1$s : %2$s : Node %3$s on Cluster %4$s has lost access to repository disk %5$s. Please recover from this error or replace the repository disk using smitty. %1$s: %2$s : Access to repository disk has been restored on Node %3$s %1$s: %2$s : Unable to determine repository disk state. Please check CAA cluster status to verify repository disk state. SplitMerge%1$s: A "TieBreaker" %2$s policy has been defined, but no tie breaker has been identified. The split/merge policies are not in effect. Notify method "%1$s" is not a valid executable routine Notify interval set to %d Maximum notifications set to %d A Split policy of "Manual" requires a Merge policy of "Manual" Current policies are: The disk %1$s cannot be configured with the reserve policy (PR_exclusive) required for it to function as a Tie Breaker %1$s: A disk name must be supplied %1$s: Disk name %2$s is not valid. A valid disk name must be supplied %1$s: Persistent Reserve on %2$s %3$s held by %4$s in %5$s mode, key is %6$s %1$s: Persistent Reserve on %2$s %3$s in %4$s mode cannot be resolved to any node %1$s: Disk %2$s %3$s does not have a persistent reserve in place %1$s: The "%2$s" command returned return code %2$s. The disk Persisent Reserve key cannot be determined (Current Tie Breaker disk)There is no manual split or merge policy in place No manual action is required A manual response is required to a split or merge event. PowerHA SystemMirror will take no further action until a manual response is supplied. You can use the "smitty cl_smm_response" function, or, to have this site continue, enter /usr/es/sbin/cluster/utilities/cl_sm_continue To have the recovery action - %s - taken on all nodes on this partition, enter /usr/es/sbin/cluster/utilities/cl_sm_recover The opposite choice must be entered on the other partition %1$s: Attempt to determine the cluster state with '%2$s' was not successful. See %3$s for the output of that command. A Split policy of "TieBreaker" and a Merge policy of "Manual" is not allowed. PowerHA System Mirror: A Split or Site failure has occurred. Please use provided interfaces to select the site to recover.PowerHA System Mirror: A Split occurred across the sites. Please review.PowerHA System Mirror: A Merge occurred between the sites. Please review.PowerHA System Mirror: A Split/Merge occurred between the sites. Please review.A Split policy of "NFS" requires a Merge policy of "NFS" A Merge policy of "NFS" requires a Split policy of "NFS" ERROR : Quarantine Policy cannot be configured without Critical Resource Group ERROR : Selected Critical Resource does not exist. Please select a valid Resource Group ERROR: Disk Fencing can not be enabled/disabled if cluster services are active. Cluster services are active on node %1$s. ERROR : NFS Server name must be supplied ERROR : NFS mount directory must be supplied ERROR : NFS Exported directory must be supplied Split and Merge Action Plan set to "%1$s" ERROR : Split policy of "NFS" and Merge policy of "NFS" cannot be configured without NFS Server, NFS mount directory and NFS Exported directory ERROR : A Split policy of "%1$s" and Merge policy of "%2$s" is not allowed Supported Policy combinations 1. None-Majority 2. TieBreaker-TieBreaker 3. NFS-NFS 4. Manual-Manual ERROR : A Split policy of "%1$s" and Merge policy of "%2$s" is not allowed with current AIX Level in Stretched Cluster. Supported Policy combinations 1. None-Majority 2. TieBreaker-TieBreaker 3. NFS-NFS Split and Merge policies apply to Site events with current AIX Level You must first configure a Stretched or Linked cluster with sites before configuring Split and Merge policies. Default surviving site is valid for Linked Cluster only ERROR: site "%1$s" does not appear to exist Please use 'clmgr query repository' command on each node to verify new repository information. %1$s: An attempt to get tiebreaker information was not successful. Please make sure CAA and RSCT subsystems are up and running. Report this problem to IBM support if the problem persists A "Tie Breaker" policy has been specified, but no Tie Breaker is configured. The split/merge policy is ignored. ERROR : Split policy of "Cloud" and Merge policy of "Cloud" cannot be configured without bucket name and cloud service. ERROR : A Split policy of "%1$s" and Merge policy of "%2$s" is not allowed Supported policy combinations: 1. None-Majority 2. TieBreaker-TieBreaker 3. NFS-NFS 4. Manual-Manual 5. Cloud-Cloud ERROR : A Split policy of "%1$s" and Merge policy of "%2$s" is not allowed with current AIX Level in Stretched Cluster. Supported policy combinations: 1. None-Majority 2. TieBreaker-TieBreaker 3. NFS-NFS 4. Cloud-Cloud Removing existing cloud tiebreaker split merge policy. This will not remove the cloud bucket which is used for cloud tiebreaker operations. ERROR: Python must be installed on all nodes of a cluster for using cloud tiebreaker feature. ERROR: Python boto3 module must be installed on all nodes of a cluster for using cloud tiebreaker feature. %1$s: This tool expects to use the directory name %2$s, but there is already a file with that name %1$s: This tool expects to use the directory named %2$s, but could not create it. Check path names and permissions. %1$s: Saving existing %2$s to %3$s %1$s: Verifying clcomd communication, please be patient. %1$s: Internode communication check failed, check the clcomd.log file for more information. %1$s: Creating CAA cluster, please wait. ERROR: the hosts and repositories must either be specified together, or not at all. ERROR: "%1$s" could not be resolved via the /usr/bin/host command. Please make sure that "%1$s" is correctly defined in the /etc/hosts file. ERROR: the provide node list does not appear to contain the local host, "%1$s". ERROR: the number of provided replacement nodes (%1$d) does not match the number of nodes defined in the snapshot (%2$d). ERROR: unable to copy "%1$s" onto "%2$s". ERROR: the source snapshot, "%1$s", was made from a linked cluster. Linked clusters require a separate repository disk for each site. ERROR: unable to find the information for disk "%1$s" on "%2$s". ERROR: unable to generate a new cluster identifier. ERROR: designated repository "%1$s" could not be found on host "%2$s". Repository disks must be shared by all nodes in the cluster or site. Node %1$s has only one adapter on network %2$s Skipping subnet checks for this node and network. %1$s: ERROR: the requested operation requires Java 6 to be installed. %1$s: ERROR: failed setting the dominance field in HACMPsite. Please report this problem to IBM support. %1$s: Setting site %2$s as the dominant site based on policies and membership of resource group %3$s %1$s: Dynamic Reconfiguration requires at least %2$s Kbytes free in /var on all nodes. Node %3$s has only %4$s Kbytes available in /var. Make sure that %5$s has at least %6$s Kbytes in /var and try again. %1$s: Removing current PowerHA SystemMirror cluster information... %1$s: ERROR: Unlock DARE locks called while cluster is not stable. %1$s: ERROR: Cannot refresh "clxd" subsystem on node %2$s. %1$s: ERROR: Synching clxd config is failed. Check output for errors in local cluster configuration, correct them and try verify and sync again. %1$s: ERROR: Refreshing clxd config is failed. Check output for errors in local cluster configuration, correct them and try verify and sync again. ERROR: the directory %1$s does not exist or is not writable on the node %2$s. Please specify a different directory or create it manually and run this command again. NOTE: You must create this directory locally on all nodes for proper functionality. ERROR: This operation requires JAVA Runtime Environment(minimum version-5). Please install it and try again later. Executing custom snapshot method: "%1$s"# No candidate Resource Groups found ERROR: Resource group %1$s cannot be brought offline on %2$s due to node collocation policies with the listed resource groups. You can take the resource groups listed above offline prior to bringing this resource group offline, take the %1$s instance on other nodes offline, or use All_Nodes_in_Group selection. Usage: clRGmove -g [-n | -r | -a] [-m | -u | -d] [-i] [-s true | false ] %1$s: A snapshot cannot be applied with cluster services active on any node. %1$s: Error changing the description. Remote node: "%1$s" ("%2$s", "%3$s") Local node: "%1$s" ("%2$s", "%3$s") Cluster services status: "%1$s" ("%2$s") Remote communications: "%1$s" Cluster-Aware AIX status: "%1$s" Usage: %s [-a|-v] [clstrmgrES|clinfoES|clcomd] Consider creating a traditional, ODM-based snapshot instead. ERROR: the current cluster configuration has security settings enabled. Currently, cluster security settings are not stored in XML-formatted snapshots. ERROR: the current cluster configuration has resource group dependencies configured. Currently, resource group dependencies are not stored in XML-formatted snapshots. ERROR: the current cluster configuration has mirror groups configured. Currently, mirror groups are not stored in XML-formatted snapshots. ERROR: the current cluster configuration has storage systems configured. Currently, storage systems are not stored in XML-formatted snapshots. ERROR: the current cluster configuration has storage agents configured. Currently, storage agents are not stored in XML-formatted snapshots. ERROR: the current cluster configuration has LDAP clients configured. Currently, LDAP clients are not stored in XML-formatted snapshots. ERROR: the current cluster configuration has LDAP clients configured. Currently, LDAP clients are not stored in XML-formatted snapshots. %1$s: A snapshot cannot be applied with cluster services active on any node. Usage: clRGmove -g -n | -x -n | -r | -a [-m | -u | -d] [-i] [-s true | false ] Error: -g and any one out of -n or -x -n or -r or -a are mandatory flags ERROR: Resource groups %1$s and %2$s are defined in an "Online on Different Nodes Dependency" and so can not be handled within a single clRGmove command Usage: clRGmove -g -n | -x -n | -r | -a [-m | -u | -d] [-i] [-s true | false ] Note: "m" is the default operation %1$s: ERROR: Failed to register PowerHA SystemMirror script for AIX Live Update. %1$s: ERROR: Failed to unregister PowerHA SystemMirror script for AIX Live Update. %1$s: ERROR: Failed to register PowerHA SystemMirror kernel extensions for AIX Live Update. %1$s: ERROR: Failed to unregister PowerHA SystemMirror kernel extensions for AIX Live Update. %1$s: ERROR: Failed to find AIX Live Update commands. %1$s: AIX Live Update not enabled because PowerHA SystemMirror script '%2$s' not registered. %1$s: AIX Live Update not enabled because PowerHA SystemMirror kernel extension '%2$s' not registered as safe. %1$s: AIX Live Update not enabled because PowerHA SystemMirror kernel extension '%2$s' not registered to be preloaded. %1$s: AIX Live Update enabled. %1$s: AIX Live Update disabled. The PowerHA SystemMirror checking phase of a Live Update operation failed.The current state of PowerHA SystemMirror does not allow the execution of a Live Update.Fix the problems described in the Detail Data section.Retry the Live Update operation.PowerHA SystemMirror not correctly restarted after a Live Update operation.A problem exists with the CAA, RSCT or PowerHA SystemMirror Services subsystem.A problem occurred starting cluster services.PowerHA SystemMirror failed a Live Update checking operationbecause the current state did not allow such an operation:Another Live Update operation is in progress on another node of the cluster.The cluster was not in a stable state. Check the cluster state with the 'lssrc -ls clstrmgrES' command.The following Remote Physical Volumes are configured in asynchronous mode: - '%1$s' in Volume Group '%2$s' on node '%3$s' Move these Physical Volumes in synchronous mode, launch the Live Update operation, and then move them back in asynchronous mode. Move this Physical Volume in synchronous mode, launch the Live Update operation, and then move it back in asynchronous mode.A live Update operation has been performedPowerHA SystemMirror configuration has not been fully restored on the node.Check that the CAA, RSCT and PowerHA SystemMirror Services subsystems are started.Check that the cluster node is in NORMAL state with 'clmgr query node local'.Check that the GLVM remote physical volumes (RPV) are correctly configured.See the '%1$s' file for more information.Unable to connect to the following addresses to check RPV client configuration:The following Physical Volumes are configured with a 'single_path' reserve policy:ERROR: Python is not found in the system PATH. Python is required for GLVM support.The following resources were %1$s for application controllers %2$s. DLPAR memory: %3$d GB On/Off CoD memory: %4$d GB Enterprise Pool memory: %5$d GB. DLPAR processor: %6$d PU/%7$d VP On/Off CoD processor: %8$d CPU(s) Enterprise Pool processor: %9$d CPU(s). The following resources were %1$s for application controllers %2$s. DLPAR memory: %3$d GB On/Off CoD memory: %4$d GB Enterprise Pool memory: %5$d GB. DLPAR processor: %6$d CPU(s) On/Off CoD processor: %7$d CPU(s) Enterprise Pool processor: %8$d CPU(s). An error occurred while performing %1$s operation. %1$s: ERROR: %2$d GB of memory needed will exceed LPAR maximum of %3$d GB. %1$s: ERROR: %2$d CPU(s) needed will exceed LPAR maximum of %3$d CPU(s). %1$s: ERROR: %2$d Processing Unit(s) needed will exceed LPAR maximum of %3$d Processing Unit(s). %1$s: ERROR: %2$d Virtual Processor(s) needed will exceed LPAR maximum of %3$d Virtual Processor(s). %1$s: ERROR: Ratio of %2$d is not verified by needed %3$d Virtual Processor(s) and %4$d Processing Unit(s). %1$s: ERROR: %2$d Processing Unit(s) needed will exceed Shared Processor Pool size of %3$d CPU(s). %1$s: ERROR: %2$d GB of memory needed will exceed available resources. %3$d GB cannot be satisfied by On/Off CoD nor Enterprise Pool resources. %1$s: ERROR: %2$d CPU(s) needed will exceed available resources. %3$d CPU(s) cannot be satisfied by On/Off CoD nor Enterprise Pool resources. %1$s: ERROR: %2$d Processing Unit(s) needed will exceed available resources. %3$d Processing Unit(s) cannot be satisfied by On/Off CoD nor Enterprise Pool resources. %1$s: ERROR: Another %1$s script is currently running. Try again later on. %1$s: WARNING: Reducing duration to %2$d to still activate %3$d On/Off CoD resources. %1$s: WARNING: Reducing quantity of On/Off Cod resources to %2$d to activate the maximum of resources for 1 day. %1$s: Usage: clhmccmd -o query -a {ALL|GEN|PROC|MEM|HMC|CEC|LPAR|PROF|SPP|TRIAL|ONOFF|EPCOD|} [-l] [-H ] [-E ] [-M ] [-L ] [-P ] [-S ] clhmccmd -o change -a "attr_name1=value:attr_name2=value:..." [-H ] clhmccmd -o {acquire|release} [-f] -r {dlpar|onoff|epcod} [-m ] [-p ] [-q ] [-d ] [-t ] [-H ] clhmccmd -o halt [-L ] [-H ] %1$s: %2$d GB of DLPAR resources have been acquired. %1$s: %2$d VP(s) or CPU(s) and %3$d PU(s) of DLPAR resources have been acquired. %1$s: %2$d GB of DLPAR resources have been released. %1$s: %2$d VP(s) or CPU(s) and %3$d PU(s) of DLPAR resources have been released. %1$s: %2$d GB of On/Off CoD resources have been activated for %3$d days. %1$s: %2$d CPU(s) of On/Off CoD resources have been activated for %3$d days. %1$s: All %2$d GB of On/Off CoD resources have been released. %1$s: All %2$d CPU(s) of On/Off CoD resources have been released. %1$s: %2$d GB of Enterprise Pool CoD resources have been allocated. %1$s: %2$d CPU(s) of Enterprise Pool CoD resources have been allocated. %1$s: %2$d GB of Enterprise Pool CoD resources have been returned. %1$s: %2$d CPU(s) of Enterprise Pool CoD resources have been returned. %1$s: ERROR: unknown -%2$s parameter. %1$s: ERROR: -%2$s option requires an argument. %1$s: ERROR: missing operation. %1$s: ERROR: unsupported %2$s operation. Supported operations for -o flag are: %1$s. %1$s: ERROR: missing attribute for %2$s operation. %1$s: ERROR: unsupported %2$s attribute for %3$s operation. Supported attributes for -a flag are: %1$s %1$s: ERROR: missing resource type for %2$s operation. %1$s: ERROR: unsupported resource type for %2$s operation. Supported resource types for -r flag are: %1$s. %1$s: ERROR: missing resource quantity for %2$s operation. Provide -m, -q or -p option. %1$s: ERROR: missing request duration for On/Off CoD acquire operation. Provide -d option. %1$s: ERROR: no list of HMC defined for %2$s node. Provide -H option. %1$s: ERROR: unknown %2$s LPAR. Provide valid name through -L option. %1$s: ERROR: unknown %2$s Enterprise Pool CoD. Provide valid name through -E option. %1$s: ERROR: unknown %2$s Managed System. Provide valid name through -M option. %1$s: ERROR: unknown %2$s Profile for %3$s LPAR. Provide valid name through -P option. %1$s: ERROR: unknown %2$s Shared Processor Pool for %3$s LPAR. Provide valid name through -S option. %1$s: ERROR: %2$s LPAR does not belong to %3$s Managed System but %4$s. Provide valid name through -L or -M option. %1$s: ERROR: %2$s Managed System does not belong to %3$s Enterprise Pool CoD but %4$s. Provide valid name through -M or -E option. %1$s: ERROR: %2$s Shared Processor Pool does not belong to %3$s LPAR. Provide valid name through -S or -L option. %1$s: ERROR: HMC %2$s returns %3$s error code when running command "%4$s": %5$s %1$s: WARNING: unable to ping HMC at address %2$s. %1$s: WARNING: unable to ssh HMC at address %2$s. %1$s: WARNING: unable to query Master HMC at address %2$s from HMC at address %3$s for %4$s Enterprise Pool CoD. %1$s: WARNING: unable to change Master HMC from %2$s to %3$s for %4$s Enterprise Pool CoD. %5$s %1$s: WARNING: HMC %2$s returns %3$s error code when running command "%4$s" (attempt %5$d/%6$d): %7$s %1$s: WARNING: Master HMC has been changed from %2$s to %3$s for %4$s Enterprise Pool CoD. %1$s: WARNING: acquisition of %2$d GB of DLPAR resources has been undone. %1$s: WARNING: acquisition of %2$d VP(s) or CPU(s) and %3$d PU(s) of DLPAR resources has been undone. %1$s: WARNING: release of %2$d GB of DLPAR resources has been undone. %1$s: WARNING: release of %2$d VP(s) or CPU(s) and %3$d PU(s) of DLPAR resources has been undone. %1$s: WARNING: acquisition of %2$d GB of On/Off CoD resources has been undone. %1$s: WARNING: acquisition of %2$d CPU(s) of On/Off CoD resources has been undone. %1$s: WARNING: release of %2$d GB of On/Off CoD resources has been undone. %1$s: WARNING: release of %2$d CPU(s) of On/Off CoD resources has been undone. %1$s: WARNING: acquisition of %2$d GB of Enterprise Pool CoD resources has been undone. %1$s: WARNING: acquisition of %2$d CPU(s) of Enterprise Pool CoD resources has been undone. %1$s: WARNING: release of %2$d GB of Enterprise Pool CoD resources has been undone. %1$s: WARNING: release of %2$d CPU(s) of Enterprise Pool CoD resources has been undone. %1$s: ERROR: Failed to get the Managed System when running command "%2$s": %3$s %1$s: ERROR: The total number of virtual processors must be at least equal to the whole number of the total number of processing units, rounded up for any fraction. At least %2$d virtual processors must be present for %3$d processing units. %1$s: ERROR: Another %1$s script is currently running. Will wait max till %2$d sec.. %2$s: ERROR: Waited for %1$d sec, another %2$s script is still running. Try again later on. %1$s: ERROR: no list of NovaLink defined for %2$s node. Provide -H option. %1$s: WARNING: unable to ping NovaLink at address %2$s. %1$s: %2$s GB of DLPAR resources have been acquired. %1$s: %2$d VP(s) or CPU(s) and %3$s PU(s) of DLPAR resources have been acquired. %1$s: WARNING: acquisition of %2$d VP(s) or CPU(s) and %3$s PU(s) of DLPAR resources has been undone. %1$s: WARNING: The operation have completed partially. So,the acquisition of %2$s GB of DLPAR resources has been undone. %1$s: %2$s GB of DLPAR resources have been released. %1$s: %2$d VP(s) or CPU(s) and %3$s PU(s) of DLPAR resources have been released. %1$s: WARNING: release of %2$d VP(s) or CPU(s) and %3$s PU(s) of DLPAR resources has been undone. %1$s: WARNING: release of %2$s GB of DLPAR resources has been undone. %1$s: ERROR: Waited for %2$d sec on asynchronous release of resources, but still in progress. Could not proceed, try again later on. %1$s: WARNING: unable to change Master HMC from %2$s to %3$s for %4$s Enterprise Pool CoD. %5$s.. Now trying with force option. %1$s: %2$d GB of DLPAR resources have been allocated. %1$s: %2$d VP(s) or CPU(s) and %3$d PU(s) of DLPAR resources have been allocated. %1$s: WARNING: allocation of %2$d VP(s) or CPU(s) and %3$d PU(s) of DLPAR resources has been undone. %1$s: WARNING: allocation of %2$d GB of DLPAR resources has been undone. %1$s: WARNING: activation of %2$d GB of On/Off CoD resources has been undone. %1$s: %2$d GB of Enterprise Pool CoD resources have been acquired. %1$s: %2$d CPU(s) of Enterprise Pool CoD resources have been acquired. %1$s: ERROR: NOVALINK %2$s returns %3$s error code when running command "%4$s": %5$s %1$s: -Information- Current Cluster Aware AIX version (bos.cluster.rte is %2$s) does not support Automatic Repository Replacement. %1$s: Unable to verify the version of Cluster Aware AIX (bos.cluster.rte) on this node. %1$s: Cluster topology on primary/backup repositories has changed. ERROR: Problems were encountered while adding backup repositories to the AIX cluster on node %1$s. See %2$s and %3$s for the output of the %4$s command. ERROR: Problems were encountered while synchronizing (remove operation) the backup repositories of the AIX cluster. See %1$s and %2$s for the output of the %3$s command. ERROR: Problems were encountered while synchronizing (add operation) the backup repositories of the AIX cluster. See %1$s and %2$s for the output of the %3$s command. ERROR: Problems were encountered while synchronizing the SystemMirror site information with the AIX cluster information. Please report this problem to IBM support. Node %1$s is part of the SystemMirror site %2$s which is known to CAA as site %3$s Changing the CAA site named %4$s to %5$s to match SystemMirror Not able to restart the clevmgrdES daemon on node %1$s. Try restarting the clevmgrdES daemon using the following commands %2$s If the problem persists, contact IBM support. Usage: clanalyze -a -s 'start_time' -e 'end_time' [-n ALL|node1|node2] clanalyze -a -s 'start_time' -e 'end_time' -p 'Error String' [-n ALL|node1|node2] clanalyze -a -p 'Error String' [-n ALL|node1|node2] clanalyze -a -o [all|recent] [-n ALL|node1|node2] clanalyze -a -o [all|recent] -d clanalyze -a -p 'Error String' -d clanalyze -a -s 'start_time' -e 'end_time' -p 'Error String' -d clanalyze -a -s 'start_time' -e 'end_time' -d clanalyze -a -u [-n ALL|node1|node2] clanalyze -s 'start_time' -e 'end_time' -f '/var/hacmp/log/hacmp.out' [-n ALL|node1|node2] clanalyze -s 'start_time' -e 'end_time' -x 'hacmp.out' -d clanalyze -c clanalyze -v [-n ALL|node1|node2] Example: clanalyze -a -s '2017-06-23T05:45:53' -e '2017-06-01T01:00:05' -n all clanalyze -a -p 'Diskfailure' clanalyze -a -p 'Interfacefailure' -d '/tmp/ibmsupt/hacmp/snap.Z' clanalyze -a -o all File %1$s does not exist or the file is empty. Check the path provided. %1$s is a directory. Please provide file name. Please provide integer only. No write permission in path %1$s. Path %1$s not found. Cluster is not configured. Following nodes given by user are not part of cluster : %1$s. Invalid analysis scope. Please provide 'all' or 'recent'. analysis, log daemon verification, log copy, log size modification cannot be done together. Start time and end time must be provided. You specified a new log file size of %1$d, but did not specify the log file name. Please specify the log file for which size needs to be modified.recent/all cannot be combined with start/end time. recent analysis and pattern analysis cannot be done together. Invalid arguments. Please use '-a' option for analysis. Invalid argument value %1$s for option %2$s. Unexpected flag or argument encountered. Missing argument. Please provide proper arguments. Invalid arguments or options: '%1$s'. Verification of log daemons failed. Verification of log daemons is successful. Copy of logs failed. Logs copied successfully to %1$s. Log extraction failed. Logs extraction is successful. Log file size modification failed. Logs file size modification successful. Analysis failed. Analysis completed successfully. Invalid arguments. Log extraction cannot go with log modification or daemon verification. Daemons state and configuration can not be verified when cluster is unavailable. Enough space is not available in the filesystem: %1$s. Amount of space required: %2$d kb. Amount of free space available in the filesystem: %3$d kb. Following nodes are not part of cluster or not active: %1$s. They will not be considered for analysis or extraction. Following nodes are invalid: %1$s . Analysis cannot be continued. Following nodes will be considered for analysis or extraction: %1$s. Provided %1$s month: %2$d is invalid. Provided %1$s day: %2$d is invalid. Provided %1$s time: %2$s is invalid. Provided %1$s time is not in the range %2$s - %3$s. File: %1$s does not exist in node: %2$s. File: %1$s does not contain provided timestamp: %2$s in node: %3$s. Removed seconds from end time. Searching with %1$s in file %2$s. Removed minutes from end time. Searching with %1$s in file %2$s. Removed seconds from start time. Searching with %1$s in file %2$s. Removed minutes from start time. Searching with %1$s in file %2$s. Uncompression failed for file: %1$s with error code: %2$d. Check the permissions, space availability and retry the operation. Unzipping failed for file: %1$s with error code: %2$d. Check the permissions, space availability and retry the operation. clanalyze does not support the provided file format: %1$s. Archive extraction failed for file: %1$s with error code: %2$d. Check the permissions, space availability and retry the operation. clanalyze does not support any format other than tar and pax for compressed files. clanalyze does not support any format other than tar, gz, pax, Z. Extraction failed for pax file: %1$s in path: %2$s with error code: %3$d. Check the permissions, space availability and retry the operation. Extraction failed for pax file: %1$s with error code: %2$d. Check the permissions, space availability and retry the operation. File: %1$s does not exist in clsnap: %2$s. File: %1$s in clsnap does not contain provided timestamp: %2$s. Provided start time: %1$s should be less than or equal to end time: %2$s. Log content extraction failed for nodes: %1$s. Log content extraction failed in clsnap for nodes: %1$s. Provided %1$s timestamp %2$s is invalid. Directory: %1$s creation failed with error code: %2$d. Check the permissions, space availability and retry the operation. No options were passed. Pass valid options to the clanalyze tool. Core dump analysis can not be done without analysis flag. Add '-a' along with '-u'. Core dump analysis failed. Core dump analysis completed successfully. Core dump analysis from errpt output for node %1$s is - errpt output for node %1$s does not have any CORE DUMP data. Empty node list. Either there is no cluster configured or cluster nodes are unreachable. File %1$s does not exists or file is empty or file belongs to clsnap. You provided a start time for log extraction but no end time. You must provide both start and end time. Use the '-e' flag to specify the end time. Provide log file name to be extracted. Error pattern and analyze option 'all' cannot be accepted together. Node list and snapfile cannot be accepted together. Provided pattern: %1$s is invalid or not supported by Loganalyzer. Check IBM documentation regarding all supported error patterns. Failed to create file: %1$s. Check the permissions, space availability and retry the operation. Failed to copy file: %1$s. Check the permissions, space availability and retry the operation. Unable to find the starting or ending line for extraction. Extracted data from file: %1$s for node: %2$s is at: %3$s Log extraction is unsuccessful. Possible reasons could be unavailability of timestamps in log files or log files missing. INFORMATION: The changes to log file %1$s will not take effect until cluster services are restarted. Log content extraction is successful for nodes: %1$s. Log copy, log daemon verification and snap file analysis or extraction cannot be accepted together. Node list and log copy options cannot be accepted together. Analysis option 'recent' or 'all' cannot be accepted with filename. %1$s: ERROR: Log file size %2$d is not valid. Please enter the log file size in Megabytes between 1 and 32767Provided %1$s timestamp: %2$s format is invalid. File: %1$s does not exist in path: %2$s. command %1$s failed with exit code %2$d. stderr from the command is: %1$s WARNING: The following specified nodes in /etc/environment for TUNABLE_ROHA_SKIP_NODE variable are not part of the cluster: %1$s Consistency Group %1$s not found in PowerHA Consistency Group List Relationship %1$s not found in PowerHA relationship list. Consistency Group name : %1$s Relationship name : %1$s Direction of mirroring : Site %1$s TO Site %2$s Direction of mirroring : Not Applicable Copy type : %1$s Mirror state : %1$s Mirror status : %1$s Relationships are not defined in PowerHA Consistency Groups not defined in PowerHA %1$s: Online Secondary node not found for %2$s. %1$s: Online Secondary node %2$s is reachable for %3$s. %1$s: Online Secondary node %2$s is unreachable for %3$s.%1$s: Usage: %2$s [-r] "RG1 RG2 ..." %1$s: Cluster is not in stable state. Exiting failover rehearsals %1$s: Following resource groups %2$s failed for online secondary node check. Resource Group : %1$s Volume Group : %1$s SVC PPRC Mirroring details: No relationship found on disk with uid %1$s. Volume Group not configured. Storage details: No SVC PPRC storages are configured in cluster %1$s: The command '%2$s' has exceeded allotted execution time %3$d seconds. No resource group has a mirrored volume group. No mirrored volume group configured for %1$s resource group. %1$s: Usage: %2$s [-c ] [-h] [-r] "RG1 RG2 ..." %1$s: Event completed successfully for network %2$s. Stability has been restored. WARNING: Network %1$s is experiencing continuous state changes for one or more network interfaces. Check the state of all interfaces on this network to ensure proper operation. Usage: %1$s -r "RG1 RG2 ..." [-c] [-h] ERROR: Resource group %1$s is in ERROR or ERROR_SECONDARY state on one or more nodes. You must first bring the group offline to ensure all resources are properly released before bringing it up on a different node. The move request for %1$s will be ignored. ERROR: The cluster must be in stable state before using this utilitiy. Wait for the cluster to stabilize and try again. ERROR: Resource group %1$s is not online or in error state. A primary or secondary instance of a resource group must be in online or in error state before you can move it offline. Check spelling and try again Waiting for resource group state to propagate Network %1$s is stable. %1$s: Event completed successfully for network %2$s. Check network status. The administrator initiated the following operation at %1$s: Check smit.log and clutils.log for additional details. PowerHA: Cluster services started on %1$s, event serial number %2$d PowerHA: An error occurred while processing the request to start cluster services on %1$s If this problem persists, please report it to IBM support. Error: operation to list all the disk information in storage failed. Error: Starting of consistency group %1$s failed. Error: Flash copy consistency group %1$s creation failed. Error: Flash copy mapping creation for source disk %1$s and target disk %2$s failed. Error: Consistency group %1$s not found. Error: Consistency group %1$s is in empty state. Consistency group %1$s successfully stopped. Error: Consistency group %1$s already in idle or copied state. Cannot stopFlash Copy finished successfully. Error: No flash copy disk mapping exists in %1$s consistency group Consistency group state is %1$s. Error: Flash Copy did not seem to progress. Flash Copy in progress :%1$s percent completed. Error: Number of disks in source and target list are unequal. Error: Same disk defined in source and target disk list. Flashcopy started successfully. ERROR: Failed to propagate backup configuration file %1$s to remote nodes. ERROR: Failed to remove backup configuration file %1$s on %2$s. Try removing the file manually on failure nodes if file exists. Error: storage profile %1$s ip address %2$s unreachable. Error: %1$s storage %2$s required for %3$s backup method. Error: consistency group %1$s not configured for %2$s backup method. Error: No %1$s configured for consistency group %2$s. Error: Failed to get consistency group %1$s information. Error: Invalid %1$s %2$s in %3$s %4$s. Error: volume group "%1$s" on disk "%2$s" with UUID "%3$s" not found in any consistency group. Error: consistency group %1$s %2$s %3$s not found in volume group list. Usage: cl_cbm_aws_utils -o {upload} -b {bucket name} -f {local file path} -k {encryption} cl_cbm_aws_utils -o {query} -b {bucket name} -r {resource group} -s {start time} -e [end time] cl_cbm_aws_utils -h Options: -o Actions upload - To upload a file to cloud from local file system. query - To list the files in the given bucket. -f File name with full path which need to upload to cloud. -b AWS Simple Storage Service(S3) Bucket name to keep the backup files. -r To list the files which are specific to provided resource group. -s To list the files which are uploaded from start time. -e To list the files which are uploaded from start time to end time. -k To provide encryption algorithm for a file while uploading. -h Help Example: cl_cbm_aws_utils -h cl_cbm_aws_utils -o upload -b pha-bucket -f /home/myfile.txt -k aes cl_cbm_aws_utils -o query -b pha-bucket cl_cbm_aws_utils -o query -s "2017-01-23T10" -e "2018-01-30T11" ERROR: The given argument %1$s is not supported. Please provide valid arguments. ERROR: The provided bucket does not exist in AWS S3. File %1$s was uploaded successfully to AWS S3. ERROR: Failed to upload file %1$s to AWS S3. ERROR: Unable to access this bucket. Please check your access permissions. The given option %1$s is not supported with query. Provide start time with -s flag. Provided bucket %1$s does not have any objects. No files are uploaded to cloud with the given inputs. Available buckets are: Error: volume group should not exist on target disk "%1$s" for cloud backup method. Target disk "%1$s" is part of "%2$s" fcmap in consistency group "%3$s". ERROR: The specified volume group disks, "%1$s", do not exist in any replicated resource "%2$s". ERROR: The specified replicated resources disks, "%1$s", do not exist in any volume group "%2$s". Error: Failed to start flash copy for consistency group %1$s. Error: prestart failed for consistency group %1$s. Error: Consistency group %1$s is already in copying status. Error: Failed to get status of consistency group %1$s. Incorrect consistency group %1$s status. Error:Flash copy failed for consistency group %1$s and status is %2$s. Error: Invalid progress state for consistency group %1$s. Error: Failed to execute %1$s command. Error: Failed to get %1$s info. Error: Failure occurred while creating backup image for backup profile %1$s. Error: Failed to get replicated resource target disk names from local node. Error: No progress in flash copy in 3600 seconds for consistency group %1$s. Consistency group %1$s: Flash copy completed %2$s percent. Error: Failed to create backup image file from flash copy for disk %1$s. Usage: cl_cbm_backup -b {backup profile name} cl_cbm_backup -h Options: -b Backup profile name. -h Help Example: cl_cbm_backup -h cl_cbm_backup -b backup_profile_name consistency group %1$s status is %2$s. Error: Backup is already in progress for backup profile %1$s, exiting. Backup profile :%1$s Associated storages :%1$s Associated nodes :%1$s Replicated resources :%1$s Replicated resources status: Replicated resources type :%1$s Warning: There are no Backup Profiles configured. Error: Failed to parse cloud backup profile configuration file %1$s Error: Could not open or write to file %1$s Error: %1$s does not appear to exist. Error: Invalid arguments %1$s entered and ignoring those arguments. Error: cloud backup profile configuration file %1$s not found. Error: No backup profiles exist. Error: No storage systems exist. Error: Backup Profile is expected as an input. Error: Storage name is expected as an input. Failed to execute the script %1$s. Exiting. %1$s: No action for remote backup of backup profile %2$s. %1$s: Manual intervention is required %1$s: Warning: The consistency group %2$s is stopped and in an idling_disconnected state. The attempt to restart it has failed.So there is no remote storage copy for backup profile. %1$s: The consistency group %2$s was stopped and was successfully restarted. %1$s: Failed to get storage details for the consistency group %2$s. %1$s: Remote Copy is currently disabled for backup profile %2$s. %1$s: Failed to get backup profile details of %2$s. Cloud upload failed for disk %1$s. Flash copy finished. Image creation started. Error: Unable to upload file to AWS. Associated bucket :%1$s Storage profiles Storage name :%1$s Storage type :%1$s Reachable on IP :%1$s Cloud storage details Bucket name :%1$s Cloud service provider :%1$s Connectivity status :%1$s Flash_copy consitency group %1$s stopped for backup profile %2$s Failed to stop flash copy consistency group %1$s for backup profile %2$s Flash copy is in %1$s state. Backup profile %1$s does not have any replicated resources. The dd process %1$d was cancelled for backup profile %2$s. No dd process exists for backup profile %1$s. The upload to cloud process %1$d for backup profile %2$s was cancelled. Flash copy status : %1$s Image copy status : %1$s Upload to cloud status : %1$s Next backup schedule : %1$s Error: consistency group %1$s not found in any of the storages "%2$s". Error: No mapping configured for consistency group %1$s. Warning: Target disk "%1$s" should be shareable to all the cluster nodes. Error: Failed to get consistency group %1$s information from storage. Please make sure storage is reachable and storage role is master storage for this consistency group. No backup process to cancel. Operation to cancel backup was successful. Error: Checking bucket connection failed, please give the valid bucket name or check the network connectivity. Usage: cl_cbm_aws_utils -o {upload} -b {bucket name} -f {local file path} -k {aes|kms} cl_cbm_aws_utils -o {query} -b {bucket name} -r {resource group}-s {start timer} -e [end time] cl_cbm_aws_utils -o {check} -b {bucket_name} cl_cbm_aws_utils -h Options: -o Actions upload - To upload a file to cloud from local file system. query - To list the files in the given bucket. check - To validate the bucket existance and bucket access in aws cloud. -f File name with full path to upload to cloud. -b AWS Simple Storage Service(S3) Bucket name to keep the backup files. -r To list the files which are specific to provided resource group. -s To list the files which are uploaded from start time. -e To list the files which are uploaded from start time to end time. -k To provide encryption algorithm for a file while uploading. -h Help Example: cl_cbm_aws_utils -h cl_cbm_aws_utils -o upload -b pha-bucket -f /home/myfile.txt -k aes cl_cbm_aws_utils -o query -b pha-bucket cl_cbm_aws_utils -o check -b pha-bucket cl_cbm_aws_utils -o query -s "2017-01-23T10" -e "2018-01-30T11" Image creation or cloud upload failed for one or more disks in Backup profile %1$s. Check clutils.log for more details.Copy of rootvg operation failed. Check clutils.log for more details.Flash copy operation failed for Backup profile %1$s. Check clutils.log for more details.ERROR: Failed to upload file %1$s to CLOUD storage. ERROR: The provided bucket does not exist in CLOUD storage. File %1$s was uploaded successfully to CLOUD storage. ERROR: Endpoint configuration is missing for connecting to IBM CLOUD. Please mention endpoint with ibm_profile_endpoint= in config file located at /.aws/config. ERROR: Unsupported CLOUD service option detected. Please check. Usage: cl_cbm_cloud_utils -o {upload} -b {bucket name} -f {local file path} -k {aes|kms|disable} -c {ibm|aws} cl_cbm_cloud_utils -o {query} -b {bucket name} -r [resource group] -s [start time] -e [end time] -c {ibm|aws} cl_cbm_cloud_utils -o {check} -b {bucket_name} -c {ibm|aws} cl_cbm_cloud_utils -h Options: -o Actions upload - To upload a file to cloud from local file system. query - To list the files in the given bucket. check - To validate the bucket existance and bucket access in aws cloud. -f File name with full path to upload to cloud. -b Cloud storage Bucket name to keep the backup files. -r To list the files which are specific to provided resource group. -s To list the files which are uploaded from start time. -e To list the files which are uploaded from start time to end time. -k To provide encryption algorithm for a file while uploading. -c To provide name of the cloud service -h Help Example: cl_cbm_cloud_utils -h cl_cbm_cloud_utils -o upload -b pha-bucket -f /home/myfile.txt -k aes -c aws cl_cbm_cloud_utils -o query -b pha-bucket -c ibm cl_cbm_cloud_utils -o check -b pha-bucket -c ibm cl_cbm_cloud_utils -o query -s "2017-01-23T10" -e "2018-01-30T11" -b pha-bucket -c ibm Image creation or CLOUD upload failed for one or more disks in Backup profile %1$s. Check clutils.log for more details. ERROR: Unsupported CLOUD service option %1$s is detected. Valid CLOUD service options are IBM and AWS. Error: Failed to clean VGDA data on disk %1$s. Please perform the operation manually by referring AIX documentation. Contact IBM support if the problem persists. Error: Failed to create backup for backup profile %1$s. Error: Failure occurred while checking flash copy progress for consistency group %1$s. WARNING: Target disk with uuid "%1$s" is not found in the lspv output on the local node "%2$s". Please make sure the disk with same uuid is available across all the nodes. Error: Cluster should be configured before execution of availability metrics utility. Error: No data available for report generation. Check %1$s for more details related to failure(s). Error: Failed to communicate with node %1$s in a cluster. Error: Failed to copy clavailability.log file(s) from node %1$s. Usage: cl_availability [-n ALL|[,]] cl_availability -d [-n ALL|[,]] cl_availability [-r ALL|[,]] cl_availability -d [-r ALL|[,]] cl_availability [-s ALL|[,]] cl_availability -d [-s ALL|[,]] cl_availability [-v [-n ALL|[,]]] cl_availability -d [-v [-n ALL|[,]]] cl_availability [-m [-n ALL|[,]]] cl_availability [-R :ALL|resource_name[,resource_name#2]] Options: -n display node centric report for the specified nodes. -d display detailed report capturing time taken for each sub event or operation. -r display resource group centric report for specified resource groups. -s display site centric report for specified sites. -v display the verification and synchronization report. -R display resource centric report. Resource type should be service_ip, volume_group, applications, filesystem, nfs or wpar_name. -m display the miscellaneous events report like Network Events Report etc. Example: cl_availability -n ALL cl_availability -d -n node1,node2 cl_availability -R service_ip:all cl_availability -R volume_group:vg1,vg2 cl_availability -v cl_availability -m -n ALL Invalid argument(s). Check the usage and try again. Following nodes will be considered for analysis: %1$s Following nodes will not be considered for analysis, as they are not part of cluster or not active: %1$s All the input nodes provided are not part of cluster or not active. Hence analysis cannot be continued. Warning: Option -d is ignored, as it is not supported with option -R. Time taken to start cluster services on node "%1$s", triggered at [%2$s]: %3$s secondsAverage time taken for recent %1$s occurrences to start cluster services on node "%2$s": %3$s seconds Data related to start cluster services is not available on node "%1$s". Time taken to stop cluster services on node "%1$s", triggered at [%2$s]: %3$s secondsAverage time taken for recent %1$s occurrences to stop cluster services on node "%2$s": %3$s seconds Data related to stop cluster services is not available on node "%1$s".Failed to fetch resource groups in node: %1$s Failed to fetch %1$s in Resource Group %2$s Time taken to bring resources online in Resource Group "%1$s" on node "%2$s": %3$s secondsTime taken to bring resources offline in Resource Group "%1$s" on node "%2$s": %3$s secondsAverage Time taken to bring resources online in Resource Group "%1$s" on node "%2$s": %3$s secondsAverage Time taken to bring resources offline in Resource Group "%1$s" on node "%2$s": %3$s seconds %1$s %2$s at %3$s : %4$s seconds Average %1$s %2$s : %3$s secondsResourceGroup "%1$s" is not configured in cluster or is not synchronized, hence not considered for analysisWarning: Partial data is copied from node(s) "%1$s" and the same is considered for analysis. Possible reasons could be either space issue or network failure. Check %2$s for more details related to failure(s). Node Centric Report: Node : %1$sEvent or Operation performed : %1$sTime at which latest event occurred : %1$sTime taken for the latest event : %1$d secondsAverage time taken for recent %1$d occurrences : %2$d seconds Detailed Report: Resource Group : %1$sTime at which latest event occurred : NULLTime taken for the latest event : NULLAverage time taken for recent occurrences : NULLAverage time taken for recent occurrences : %1$d seconds Resource Type : %1$sResource name : %1$sOperation performed : %1$sWarning: No Resource Groups are configured in cluster, analysis is not doneSub Operation performed : %1$sTime at which sub operation occurred : %1$sTime taken for the sub operation : %1$d secondsAverage time taken for the sub operation : %1$d seconds ERROR: %1$s: Unable to find the PID for command %2$s. ERROR: %1$s: An error occurred while collecting CPU usage, please report it to IBM service. Cluster Verification Report:Memory : %1$sCPU : %1$sProcessing Units : %1$sVirtual Processors : %1$s Node(s) %1$s is not in participating nodes of Resource Group %2$s. No report is generated for these nodes Time at which event occurred : %1$s Network Name : %1$sTime taken for the event : %1$s secondsNetwork Events Report: Swap Adapter Events Report:Interface name : %1$sService IP name : %1$sTime taken for the event : %1$s Network Time : %1$s seconds Application Time : %1$s seconds Storage Time : %1$s secondsWarning: This operation is supported only on multi site cluster. Failed to fetch sites of a cluster. Following sites are not defined in a cluster, hence analysis cannot be continued: %1$s Following sites will not be considered for analysis, as they are not part of cluster: %1$s Failed to fetch nodes in site: %1$s Site : %1$s Site Centric Report: Cluster Synchronization Report:Invalid resource type "%1$s", Valid resource types are: %2$s.Resource %1$s is either not configured or does not belong to any Resource Group.Python must be installed on the node, to execute "%1$s" command.Detailed report of DARE operations:No resource configured in the DARE.No resource unconfigured in the DARE.The combination of arguments provided is not supported. Please provide proper arguments. Argument "%1$s" is not in expected format. Please provide proper arguments. %1$s resource is not configured in cluster. Data related to DARE operations is not available on node: %1$sAs ALL is specified in input, the report will be generated for all the %1$s in the cluster. Following inputs will be ignored: %1$s Resource name is missing. Please provide valid resource name. Network Time : %1$s Application Time : %1$s Storage Time : %1$sTime taken for the latest event : %1$sAverage time taken for recent occurrences : %1$sNetwork Down events are not found.Network Up events are not found.Swap adapter events are not foundERROR: Cluster is in the middle of migration, hence analysis can not be performed.Time taken for the latest event (HH:MM:SS) : %1$sAverage time taken for recent occurrences (HH:MM:SS) : %1$sAverage time taken for recent %1$d occurrences (HH:MM:SS) : %2$s Network Time (HH:MM:SS) : %1$s Application Time (HH:MM:SS) : %1$s Storage Time (HH:MM:SS) : %1$sTime taken for the sub operation (HH:MM:SS) : %1$sAverage time taken for the sub operation (HH:MM:SS) : %1$sTime taken for the event (HH:MM:SS) : %1$sUsage: cl_availability [-n ALL|[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability -d [-n ALL|[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability [-r ALL|[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability -d [-r ALL|[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability [-s ALL|[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability -d [-s ALL|[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability [-v [-n ALL|[,]]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability -d [-v [-n ALL|[,]]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability [-m [-n ALL|[,]]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability [-R :ALL|resource_name[,]][-a Number][-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] cl_availability [-p ]][-n ALL|[,]] [-b YYYY-MM-DDThh:mm [-e YYYY-MM-DDThh:mm]] Options: -n display node centric report for the specified nodes. -d display detailed report capturing time taken for each sub event or operation. -r display resource group centric report for specified resource groups. -s display site centric report for specified sites. -v display the verification and synchronization report. -R display resource centric report. Resource type should be service_ip, volume_group, applications, filesystem, nfs or wpar_name. -m display the miscellaneous events report like Network Events Report etc. -a display average time taken for recent number of event occurrences(default=5). generate the report based on average input number of events available in availabilty log. If input number of events is larger than the number of logged events, the report is created using the logged events. -b display the event report from given date {YYYY-MM-DDThh:mm}(if end date is not provided then current date used as end date) -e Display the event report up to given date. make sure end date is greater than start date and this option is valid only when begin date is passed. -p Display a JSON formatted file of CPU and memory usage for specified application servers. If this option is used then -n, -b, and -e are required and all others are not allowed. Example: cl_availability -n ALL cl_availability -n ALL -a 10 cl_availability -n ALL -b 2019-11-01T10:26 -e 2019-11-30T02:29 cl_availability -d -n node1,node2 cl_availability -R service_ip:all cl_availability -R volume_group:vg1,vg2 cl_availability -m -n ALL cl_availability -p ALL -n ALL -b 2019-04-01T03:14 -e 2019-04-15T09:24 cl_availability -v start date "%1$s" is greater than end date "%2$s" which is invalid,check input and try again. Date format "%1$s" is invalid. Check input date and try again. Input date "%1$s" is greater than current date "%2$s" which is invalid,check input and try again. Config_too_long event name : %1$s Config_too_long Events Report:Application monitor %1$s is not in the configuration. Configuration has no application servers defined. Application server is not defined: %1$s None of the specified Application servers are defined. Internal cl_availability error, please contact IBM Software Support. Config_too_long events are not found AIX down events report:AIX down events are not foundERROR: Python must be installed for using Cross Cluster Vertification(CCV) feature. ERROR: Failed to generate commands file "%1$s". Refer "%2$s" file for more details. ERROR: Failed to generate commands file "%1$s" used to collect cluster data. ERROR: Failed to generate cluster information file "%1$s". Refer "%2$s" file for more details. ERROR: Cluster should be configured before using Cross Cluster Verification(CCV) feature. Usage: cl_ccv_comparator --f1 {filename_1} --f2 {filename_2} [-d [cluster|rg|ifix|version|resource|network|bp|gui|all]] [-o [output_file]] cl_ccv_comparator -h Options: -d Data to display cluster - Compare the cluster data and display it. rg - Compare the Resource Group data and display it. ifix - Compare ifix information and display it. version - Compare filesets information and display it. resource - Compare number of resources configured and display it. network - Compare network statistics and display it. bp - Compare backup profile parameters configured in a cluster. gui - Compare GUI server,agent and log configuration in a cluster. all - It is the default value for -d which will compare all fields between two clusters. -f1 Filename 1 with full path in json format for comparison. -f2 Filename 2 with full path in json format for comparison. -o Output filename where comparison data will be stored. -h Help Example: cl_ccv_comparator -h cl_ccv_comparator --f1 /home/cluster1.json --f2 /home/cluster2.json -d cluster -o /var/hacmp/log/ccv_comparison_data.json ERROR: The given argument %1$s is not supported. Please provide valid arguments. ERROR: Please provide different filenames. ERROR: File or path %1$s does not exist. Please check the file name or path. ERROR: Supported options for %1$s are %2$s. ERROR: Failed to update comparison data in file %1$s. ERROR: Invalid json file %1$s provided. ERROR: Provided file format %1$s is not supported by Cross Cluster Verification(CCV) utility. Provide a valid file to generate a report. Cluster configuration from: %1$s Cluster configuration not found in: %1$s ERROR: A problem occurred using cltopinfo to generate the cluster configuration for preview. Inputs specified for preview are identical. Nothing to compare. Check your inputs and try again. Warning: Command "%1$s %2$s" failed to fetch the fileset information of "%3$s" on node %4$s ERROR: File %1$s is empty. Usage: cl_ccv_collector [-f] [-s snapshot_name] [-h] Options: -f Force flag to enable cluster data collection for unsynchronized cluster -s Snapshot name from which we need to collect data -h Help Example: cl_ccv_collector -f cl_ccv_collector -s working_snapshot cl_ccv_collector -h ERROR: Cluster is not synchronized. Use force option "-f" to collect cluster data. Warning: Cluster is not synchronized but proceeding further as force option is enabled. Full path specified: Using file %1$s as a cluster snapshot. No path specified: looking for a snapshot named %1$s in %2$s File %1$s does not appear to be a cluster snapshot. Check the path provided. ERROR: A single snapshot must be specified to retrieve a specific section. ERROR: Requested section named %1$s is not a known section name. Known sections are: %2$s Section %1$s not found in snapshot %2$s ERROR: Section %1$s is not properly formed in snapshot %2$s Report this problem to IBM support. ERROR: Force flag is not supported with snapshot collection. Warning: Failed to get the ifix information from command: ERROR: Snapshot named %1$s is not found. When specifying a full path to a snapshot, the file name must include the .odm suffix. clsnapshot is no longer supported for direct use from the command line. Please refer to the equivalent "clmgr" command. For example: "clmgr add snapshot" is the equivalent of "clsnapshot -c" To see all available clmgr commands for snapshot, use "clmgr help snapshot" ERROR: Following cluster nodes are not reachable: %1$s Provide force option "-f" to collect cluster data related to active nodes. Warning: Following cluster nodes are not reachable: %1$s Proceeding with data collection on active nodes as force option is provided. Following are active cluster nodes: %2$s Ifixes are not installed on cluster %1$sIfixes are different in both the clustersIfixes installed on cluster %1$s are similar with that of other cluster %2$sThe %1$s filesets are not installed in cluster %2$sThe %1$s filesets are different in both the clustersThe %1$s filesets installed on cluster %2$s are similar with that of other cluster %3$s%1$s are not configured on cluster %2$sNumber of %1$s are different in both the clustersNumber of %1$s are similar in both the clustersThe resource groups are not configured on cluster %1$sThe resource groups configured on cluster %1$s are similar with that of other cluster %2$sThe resource groups are different in both the clustersThe PowerHA SystemMirror networks are not configured in cluster %1$sThe PowerHA SystemMirror networks are different in both the clustersThe PowerHA SystemMirror networks installed on %1$s are similar with that of other cluster %2$sThe resource group dependencies are not configured on cluster %1$sThe resource group dependencies configured on cluster %1$s are similar with that of other cluster %2$sThe resource group dependencies are different in both the clustersThe backup profiles are not configured on cluster %1$sThe backup profiles configured on cluster %1$s are similar with that of other cluster %2$sThe backup profiles are different in both the clusters%1$s are same in both the clusters%1$s are different in both the clustersIfixes are similar in both the clustersThe %1$s filesets are similar in both the clustersThe resource groups are similar in both the clustersThe PowerHA SystemMirror networks are similar in both the clustersThe resource group dependencies are similar in both the clustersThe backup profiles are similar in both the clusters%1$s is same in both the clusters%1$s is different in both the clustersGUI server configuration information is not found on cluster %1$sGUI agent configuration information is not found on cluster %1$sGUI server log configuration is not found on cluster %1$sGUI agent log configuration is not found on cluster %1$sGUI server configuration is different in both the clustersGUI agent configuration is different in both the clustersGUI server log configuration is different in both the clustersGUI agent log configuration is different in both the clustersGUI server configuration is similar in both the clustersAll of the GUI server configuration parameters in cluster %1$s are similar with that of other cluster %2$sGUI agent configuration is similar in both the clustersAll of the GUI agent configuration parameters in cluster %1$s are similar with that of other cluster %2$sGUI server log configuration is similar in both the clustersAll of the GUI server log configuration parameters in cluster %1$s are similar with that of other cluster %2$sGUI agent log configuration is similar in both the clustersAll of the GUI agent log configuration parameters in cluster %1$s are similar with that of other cluster %2$sWarning: Failed to execute command: The caa tunable data does not exist on cluster %1$sAvailable caa tunable values on cluster %1$s are same in both the clusters.%1$s not found on this clusterWarning: No password-free method of accessing %1$s from %2$s could be identified. The ssh command will be used and will prompt you to enter the password for %1$s 5 times. To avoid this in the future, consider installing Expect, or establishing an SSH key between those hosts that eliminates the need for a password. Press to continue, or "Ctrl+c" to quit. ERROR: unable to create directory: ERROR: The local node has been specified for use but the configuration is not synchronized. You must first synchronize the cluster before using this utility. The specified host %1$s appears to be part of the local cluster %2$s ERROR: Failed retrieving cluster information from host: ERROR: one or two hosts must be specified. but %1$d were given. ERROR: one or two hosts must be specified. If you specify only one host, the local cluster will be used for comparison. Otherwise specify the hostname of 2 nodes from 2 remote clusters for comparison. All remote hosts must be accessible using ssh and scp. Number of problems found validating inputs: %1$d Launching the data collection on %1$s Command "%1$s" failed with return code %2$d The comparison information has been stored in: ERROR: something went wrong while generating comparison information. ERROR: Cluster %1$s failed while running event [%2$s], exit status was %3$d Manual intervention will be required to resume normal PowerHA processing. Until that time, no further cluster events will be processed. Follow local troubleshooting procedures to analyze any errors found in hacmp.out on the failed nodes. WARNING: node %1$s has encountered a fatal event script error. Manual intervention is required. ERROR: To compare specific sections from a snapshot, two snapshots must be specified. %1$s: Saving current snapshot %2$s to %3$s The resulting snapshot may not be useable. %1$s: Successfully ran varyonvg -a command for volume group %2$s.