#!/bin/ksh93 # ALTRAN_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # Copyright (C) Altran ACT S.A.S. 2017,2018,2019,2021,2022. All rights reserved. # # ALTRAN_PROLOG_END_TAG # # @(#) 209bbb9 43haes/usr/sbin/cluster/events/utils/cl_activate_fs.sh, 61aha_r726, 2205D_aha726, May 30 2022 04:24 PM # # COMPONENT_NAME: EVENTUTILS # # FUNCTIONS: none # # ORIGINS: 27 # # # (C) COPYRIGHT International Business Machines Corp. 1990,1994 # All Rights Reserved # Licensed Materials - Property of IBM # US Government Users Restricted Rights - Use, duplication or # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # ############################################################################### # # Name: cl_activate_fs # # This function will activate filesystems passed in as an argument. # # Returns: # 0 - All filesystems passed were either already mounted or were # successfully mounted # 1 - One or more filesystems failed to fsck or mount # 2 - Zero arguments were passed # # Argument: list of filesystems to activate # # Environment: VERBOSE_LOGGING, PATH, GROUPNAME, FSCHECK_TOOL, RECOVERY_METHOD # ############################################################################### # # Name: fs_mount # # This function will try to first mount the filesystem. It it fails # then the function will run fsck -p -o nologredo if fsck has not already # been run, and then try again. If fsck has been run previously, the mount # cannot be made to work, so this event will be logged as an error. # # Status will be appended to a status file passed as a parameter. # # Returns: # 0 - filesystem passed was successfully mounted # 1 - filesystem failed to fsck or mount # # Argument: # $1 = File system to be checked. # $2 = Tool to be used. (fsck/logredo) # $3 = Status file name # # Environment: VERBOSE_LOGGING, PATH # ############################################################################### function fs_mount { typeset PS4_TIMER="true" [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset FS="$1" typeset TOOL="$2" typeset TMP_FILENAME="$3" typeset WPAR_ROOT="" typeset MOUNT_ARGS="" typeset -i STATUS=0 typeset LVCB_info typeset FS_info typeset LV_name typeset -i RC=0 # : Here check to see if the information in /etc/filesystems for $FS : is consistent with what is in CuAt ODM for the logical volume: : the "label" field for the logical volume should match the mount : point in /etc/filesystems. # FS_info=$(LC_ALL=C lsfs -c $FS 2>&1) RC=$? if (( $RC != 0 )) then cl_log 999999 "${PROGNAME}: lsfs $FS returns\n $FS_info" $PROGNAME $FS "$FS_info" STATUS=1 # : append failure indication to the status file # echo $STATUS $FS >>/tmp/$TMP_FILENAME cl_RMupdate resource_error $FS $PROGNAME return $STATUS fi print -- "$FS_info" | tail -1 | IFS=: read skip LV_dev_name vfs_type rest LV_name=${LV_dev_name##*/} LVCB_info=$(LC_ALL=C getlvcb -T -A $LV_name 2>&1) RC=$? if (( $RC != 0 )) then cl_log 999999 "${PROGNAME}: getlvcb -T -A $LV_name returns\n $LVCB_info" $PROGNAME $LV_name "$LVCB_info" STATUS=1 # : append failure indication to the status file # echo $STATUS $FS >>/tmp/$TMP_FILENAME cl_RMupdate resource_error $FS $PROGNAME return $STATUS fi print -- "$LVCB_info" | grep -w 'label =' | read skip skip LVCB_label CuAt_label=$(clodmget -q "name = $LV_name and attribute = label" -f value -n CuAt) # : At this point, if things are working correctly, $FS from /etc/filesystems : should match $CuAt_label from CuAt ODM and $LVCB_label from the LVCB : on disk. No recovery is done at this point, because best efforts at recovery : were done in clvaryonvg. # if [[ $CuAt_label != $FS ]] then STATUS=1 cl_log 999999 "${PROGNAME}: The mount point $FS does not match the logical volume label $CuAt_label for $LV_name in CuAt" $PROGNAME $FS $CuAt_label $LV_name fi if [[ $LVCB_label != $FS ]] then STATUS=1 cl_log 999999 "${PROGNAME}: The mount point $FS does not match the logical volume label $LVCB_label for $LV_name in the LVCB" $PROGNAME $FS $LVCB_label fi if (( $STATUS == 1 )) then # : append failure indication to the status file # echo $STATUS $FS >>/tmp/$TMP_FILENAME cl_RMupdate resource_error $FS $PROGNAME return $STATUS fi [[ -n $GROUPNAME ]] && WPAR_ROOT=$(clwparroot $GROUPNAME) if [[ -n $WPAR_ROOT && -n $FS ]] then # : For filesystems in a WPAR, be sure to include the file system mount options # typeset FSINFO lsfs -c $FS | tail -1 | IFS=: read skip DEVICE skip skip skip skip OPTIONS skip FS=${WPAR_ROOT}${FS} [[ -n $OPTIONS ]] && MOUNT_ARGS="$MOUNT_ARGS -o $OPTIONS" [[ -n $DEVICE ]] && MOUNT_ARGS="$MOUNT_ARGS $DEVICE" [[ ! -d $FS ]] && mkdir -p $FS fi # Format for consumption by cl_am utility amlog_trace $AM_FS_MOUNT_BEGIN "Activating Filesystem|$FS" : Try to mount filesystem $FS at $(date "+%h %d %H:%M:%S.000") if ! mount $MOUNT_ARGS $FS then # : Straight forward mount fails # if [[ $TOOL == "logredo" ]] then # : If we previously did a 'logredo', do an fsck, but skip the : logredo. # : Filesystem check at $(date "+%h %d %H:%M:%S.000") if fsck -p -o nologredo $FS then # : On successful fsck, retry the mount # : Try again to mount filesystem at $(date "+%h %d %H:%M:%S.000") if ! mount $MOUNT_ARGS $FS then # : If mount still fails, give up # cl_RMupdate resource_error $FS $PROGNAME cl_echo 10 "$PROGNAME: Failed mount of $FS." $PROGNAME $FS STATUS=1 # note error and keep going fi else # : If fsck fails, give up # cl_RMupdate resource_error $FS $PROGNAME cl_echo 13 "$PROGNAME: Failed fsck -p of $FS." $PROGNAME $FS STATUS=1 # note error and keep going fi else # : If we already did an fsck, there is no point in trying much of : anything else # cl_RMupdate resource_error $FS $PROGNAME cl_echo 10 "$PROGNAME: Failed mount of $FS." $PROGNAME $FS STATUS=1 # note error and keep going fi fi if (( $STATUS == 1 )) then # : append failure indication to the status file # echo $STATUS $FS >>/tmp/$TMP_FILENAME # Format for consumption by cl_am utility amlog_err $AM_FS_MOUNT_FAILURE "Activating Filesystems completed|$FS" else # : On successful mount of a JFS2 file system, engage mountguard, : if we are running on an AIX level that suppors it # # Format for consumption by cl_am utility amlog_trace $AM_FS_MOUNT_END "Activating Filesystems completed|$FS" if [[ $vfs_type == "jfs2" ]] ; then # : Each of the V, R, M and F fields are padded to fixed length, : to allow reliable comparisons. E.g., maximum VRMF is : 99.99.999.999 # integer V R M F typeset -Z2 R # two digit release typeset -Z3 M # three digit modification typeset -Z3 F # three digit fix integer VRMF=0 if lslpp -lcqOr bos.rte.filesystem | cut -f3 -d':' | IFS=. read V R M F ; then VRMF=$V$R$M$F # get the SDD level fi if (( $V == 6 && $VRMF >= 601007000 )) || (( $V == 7 && $VRMF >= 701001000 )) || (( $V > 7 )) ; then # : Tell JFS2 to try to protect against double mounts via fs mountguard : the setting would cause VG timestamp change so run once # if [[ $LVCB_info != *mountguard=yes* ]] then CLUSTER_OVERRIDE=yes chfs -a mountguard=yes $FS fi fi fi fi return $STATUS } ############################################################################### # # purpose: return a list of filesystems to mount # # calling: mounts_to_do "$fslist_already_mounted" "$fslist_to_mount" # # Name: mounts_do_do() # # Return a list of filesystems to mount. # # Returns: # List of filesystems or "" # # Argument: "$1" - blank delimited list of filesystems already mounted # "$2" - blank delimited list of filesystems to mount # # Environment: None # ############################################################################### function mounts_to_do { [[ $VERBOSE_LOGGING == "high" ]] && set -x # localize data typeset tomount=$* # : Get most current list of mounted filesystems # typeset mounted=$(mount 2>/dev/null | awk '$3 ~ /jfs2*$/ {print $2}' | paste -s -) # release calling parameter memory for large numbers of filesystems shift # local temp variables typeset -A mountedArray tomountArray typeset fs # : Create an associative array for each list, which : has the side effect of dropping any duplicates # for fs in $mounted ; do mountedArray[$fs]=1 done for fs in $tomount ; do tomountArray[$fs]=1 done # release memory for large numbers of filesystems mounted='' tomount='' # : expand fs from all tomountArray subscript names # set +u for fs in ${!tomountArray[@]} do if [[ ${mountedArray[$fs]} == 1 ]] then # : remove $fs from array because it is already mounted # unset tomountArray[$fs] fi done # : Print all subscript names which are all remaining mount : points which have to be mounted # print ${!tomountArray[@]} | tr ' ' '\n' | sort -u set -u } ############################################################################### # # Name: activate_fs_process_group # # This will recover (if needed) and mount a list of file # systems associated with a resource group # # Returns: # 0 - all filesystems passed were successfully mounted # 1 - at least one filesystem failed to fsck or mount # # Argument: # Recovery method - serial or parallel # Recovery mechanism - fsck or logredo # Space separated list of file systems # # Environment: # # GROUPNAME set up as the owning resource group # ############################################################################### activate_fs_process_group() { typeset PS4_LOOP="" [[ $VERBOSE_LOGGING == "high" ]] && set -x typeset RECOVERY_METHOD FSCHECK_TOOL FILESYSTEMS typeset -i STATUS=0 RECOVERY_METHOD=$1 FSCHECK_TOOL=$2 shift 2 FILESYSTEMS=$* typeset comm_failure="" typeset rc_mount="" # : Filter out duplicates, and file systems which are already mounted # FILESYSTEMS=$(mounts_to_do "$FILESYSTEMS") if [[ -z $FILESYSTEMS ]] then return 0 fi # : Get unique temporary file names by using the resource group and the : current process ID # if [[ -z $GROUPNAME ]] then # : If no resource group name is set in the environment, find the one that : contains the first of the passed file systems # echo $FILESYSTEMS | read firstfs leftovers GROUPNAME=$(clodmget -q "name=FILESYSTEM AND value=$firstfs" -f group -n HACMPresource) fi TMP_FILENAME="$GROUPNAME""_activate_fs.tmp$$" rm -f /tmp/$TMP_FILENAME # : If FSCHECK_TOOL is null get from ODM # if [[ -z $FSCHECK_TOOL && -n $GROUPNAME ]] then FSCHECK_TOOL=$(clodmget -q "name=FSCHECK_TOOL AND group=$GROUPNAME" -f value -n HACMPresource) fi FSCHECK_TOOL=$(print $FSCHECK_TOOL) # trim leading and trailing blanks if [[ $FSCHECK_TOOL != "fsck" && $FSCHECK_TOOL != "logredo" ]] then cl_echo 95 "ERROR: Could not get the value of FSCHECK_TOOL" "FSCHECK_TOOL" FSCHECK_TOOL="fsck" fi # : If RECOVERY_METHOD is null get from ODM # if [[ -z $RECOVERY_METHOD && -n $GROUPNAME ]] then RECOVERY_METHOD=$(clodmget -q "name=RECOVERY_METHOD AND group=$GROUPNAME" -f value -n HACMPresource) fi RECOVERY_METHOD=$(print $RECOVERY_METHOD) # trim leading and trailing blanks if [[ $RECOVERY_METHOD != "sequential" && $RECOVERY_METHOD != "parallel" ]] then cl_echo 95 "ERROR: Could not get the value of RECOVERY_METHOD" "RECOVERY_METHOD" RECOVERY_METHOD="sequential" fi # set -u will report an error if any flag used in the script is not set set -u # : If FSCHECK_TOOL is set to logredo, the logredo for each jfslog has : already been done in get_disk_vg_fs, so we only need to do fsck check : and recovery here before going on to do the mounts # if [[ $FSCHECK_TOOL == "fsck" ]] then # if TOOL is set to 'fsck' interpret this as 'fsck -f -p -o nologredo' TOOL="/usr/sbin/fsck -f -p -o nologredo" for fs in $FILESYSTEMS do PS4_LOOP="$fs" lsfs $fs | grep -w $fs | read DEV rest # : Verify if any of the file system $fs is already mounted anywhere : else in the cluster. If it is already mounted somewhere else, : we dont want to continue here to avoid data corruption. # # : When a filesystem is protected against concurrent mounting, : MountGuard flag is set and lsfs command displays characteristics of file systems. # MOUNTGUARD=$(LC_ALL=C lsfs -qc $fs | tr ":" "\n" | grep -w MountGuard | cut -d" " -f2) # : fsdb and its subcommands allow us to view the information in a file system. : The FM_MOUNT flag is set if the file system is mounted cleanly on any node. # FMMOUNT_OUT=$(fsdb $fs << EOF su q EOF) FMMOUNT=$(echo "$FMMOUNT_OUT" | grep -w FM_MOUNT | awk '{ print $1 }') fsck_check="" if [[ $MOUNTGUARD == "yes" ]] && [[ $FMMOUNT == "FM_MOUNT" ]]; then fsck_check="File system has mountguard option enabled and could be mounted on another node" fi if [[ -n $fsck_check ]]; then cluster_nodes=$(clodmget -n -f name -q object=COMMUNICATION_PATH HACMPnode) #Get the list of cluster nodes # : Check if filesystem is really mounted on any of the node. # for node in $cluster_nodes do clrsh $node mount | grep -w $fs rc=$? if (( $rc == 0 )) then # : file system is mounted on the node $node, hence setting mount flag to 1 # rc_mount=1 break fi done # : perform exit if file system is already mounted on any of the node. # if [[ -n $rc_mount ]] then cl_echo 10762 "$PROGNAME: File system $fs is mounted on one of the nodes in the cluster, hence skipping mounting the file system.\n" $PROGNAME $fs exit 1 fi fi if [[ $RECOVERY_METHOD == "parallel" ]] then # Perform fsck in the background $TOOL $DEV & else $TOOL $DEV fi done # : Allow any backgrounded fsck operations to finish # wait fi # : Now attempt to mount all the file systems # ALLFS="All_filesystems" cl_RMupdate resource_acquiring $ALLFS $PROGNAME typeset PS4_TIMER="true" for fs in $FILESYSTEMS do PS4_LOOP="$fs" if [[ $RECOVERY_METHOD == "parallel" ]] then # : Call fs_mount function in background if recovery method : is set to parallel # fs_mount $fs $FSCHECK_TOOL $TMP_FILENAME & else # : Call fs_mount function in foreground for serial recovery # fs_mount $fs $FSCHECK_TOOL $TMP_FILENAME fi done unset PS4_LOOP PS4_TIMER # : Allow any background mount operations to finish # wait # : Read cluster level Preferread read option # cluster_pref_read=$(clodmget -n -f lvm_preferred_read HACMPcluster) # : Looping all file systems to update preferred read option of each lv. : By referring VG level preferred_read option or cluster level Preferred read option # for fs in $FILESYSTEMS do FS_info=$(LC_ALL=C lsfs -c $fs 2>&1) RC=$? if (( $RC != 0 )) then cl_echo 10749 "${PROGNAME}: lsfs $FS returns\n $FS_info" $PROGNAME $FS "$FS_info" STATUS=1 # : append failure indication to the status file # echo $STATUS $FS >>/tmp/$TMP_FILENAME cl_RMupdate resource_error $FS $PROGNAME return $STATUS fi print -- "$FS_info" | tail -1 | IFS=: read skip LV_dev_name vfs_type rest LV_name=${LV_dev_name##*/} volume_group=$(LC_ALL=C lslv -L $LV_name | grep -w "VOLUME GROUP") volume_group=${volume_group##*VOLUME GROUP:+([[:space:]])} volume_group=${volume_group%%+([[:space:]])*} RGName=$(clodmget -n -f group -q "name=VOLUME_GROUP and value=$volume_group" HACMPresource) # : Get the Preferred storage read option for this VG and perform chlv command # PreferredReadOption=$(clodmget -n -f value -q "name=LVM_PREFERRED_READ and volume_group=$volume_group" HACMPvolumegroup 2>/dev/null) if [[ -z $PreferredReadOption ]] || [[ "$PreferredReadOption" == "roundrobin" ]] then PreferredReadOption=$cluster_pref_read if [[ -z $PreferredReadOption ]] || [[ "$PreferredReadOption" == "roundrobin" ]] then # : Both VG level and Cluster level LVM Preferred Read option chosen as roundrobin. # chlv -R 0 $LV_name if (( $? != 0 )) then cl_echo 10748 "Unable to set preferred read copy\n" fi break fi fi typeset StorageLoc="" if [[ "$PreferredReadOption" == "favorcopy" ]] then # : Get mirror pool name of flash storage # mirrorPoolName=$(clodmget -n -f object -q "volume_group=$volume_group and value=flashstorage and name=STORAGE_LOCATION" HACMPvolumegroup 2>/dev/null) StorageLoc="flashstorage" else # : Get the RG acquiring node name # nodename=$(clRGinfo -s $RGName |grep -v SECONDARY |grep -w "ACQUIRING" |cut -f3 -d":") # : Get site name # siteName=$(LC_ALL=C cllssite -c -n| grep -v "#nodename" | grep -w $nodename | awk -F: '{print $2}') # : Get mirror pool name of online site storage location # mirrorPoolName=$(clodmget -n -f object -q "volume_group=$volume_group and value=$siteName and name=STORAGE_LOCATION" HACMPvolumegroup 2>/dev/null) StorageLoc=$siteName fi if [[ -z $mirrorPoolName ]] then # : mirror pool doesnt exist for selected preferred read option # cl_echo 10753 "WARNING: LVM Preferred Read for volume group $volume_group is set to $PreferredReadOption, but the associated storage location, $StorageLoc, is not \ configured for any mirror pool copy.\nHence the LVM Preferred Read setting will be overridden as roundrobin so that AIX will decide which copy needs to be used \ while reading the data." $volume_group $PreferredReadOption $StorageLoc chlv -R 0 $LV_name if (( $? != 0 )) then cl_echo 10748 "Unable to set preferred read copy\n" fi break fi # : Get the copy number of selected mirror pool # copyNum=$(lslv -L $LV_name | grep -w "$mirrorPoolName" | awk '{print $2}') if [[ -n $copyNum ]] then # : Set preferred read option to chlv # chlv -R $copyNum $LV_name if (( $? != 0 )) then cl_echo 10748 "Unable to set preferred read copy\n" fi else # : mirror pool copy does not exist for selected preferred read option # cl_echo 10753 "WARNING: LVM Preferred Read for volume group $volume_group is set to $PreferredReadOption, but the associated storage location, $StorageLoc, is not \ configured for any mirror pool copy.\nHence the LVM Preferred Read setting will be overridden as roundrobin so that AIX will decide which copy needs to be used \ while reading the data." $volume_group $PreferredReadOption $StorageLoc chlv -R 0 $LV_name if (( $? != 0 )) then cl_echo 10748 "Unable to set preferred read copy\n" fi fi done # : Update the resource manager with the state of the operation # ALLNOERROR="All_non_error_filesystems" cl_RMupdate resource_up $ALLNOERROR $PROGNAME # : And harvest any status from the background mount operations # if [[ -f /tmp/$TMP_FILENAME ]] then if grep '^1' /tmp/$TMP_FILENAME then STATUS=1 else rm -f /tmp/$TMP_FILENAME fi fi return $STATUS } ############################################################################### # # Name: activate_fs_process_resources # # This will recover (if needed) and mount a list of file # systems, as directed by process_resources # # Returns: # 0 - all filesystems passed were successfully mounted # 11 - at least one filesystem failed to fsck or mount # # Argument: # None # # Environment:Set by process_resources: # # RESOURCE_GROUPS # FILE_SYSTEMS # FSCHECK_TOOLS # RECOVERY_METHODS # ############################################################################### activate_fs_process_resources() { [[ "$VERBOSE_LOGGING" == "high" ]] && set -x typeset -i ERRSTATUS=0 integer RC=0 # # Process each resouce group, according to its specified procedures # RESOURCE_GROUPS - space separated list of resource groups # FILE_SYSTEMS - list of file systems for resource groups # - colon separated lists for file systems for each # resource group # - comma separated list of file systems within a # resource group # # FSCHECK_TOOLS - list of recovery tools for resource groups. Same # pattern of comma and colon separated lists as for # file systems, though all file systems in a # resource group use the same value # # RECOVERY_METHODS - list of recovery procedures for resource # groups. Same pattern of comma and colon separated # lists as for file systems, though all file systems # in a resource group use the same value # for GROUPNAME in $RESOURCE_GROUPS do export GROUPNAME # : Get the file systems, recovery tool and procedure for this : resource group # print $FILE_SYSTEMS | IFS=':' read _RG_FILE_SYSTEMS FILE_SYSTEMS print $FSCHECK_TOOLS | IFS=':' read _RG_FSCHECK_TOOLS FSCHECK_TOOLS print $RECOVERY_METHODS | IFS=':' read _RG_RECOVERY_METHODS RECOVERY_METHODS # : Since all file systems in a resource group use the same recovery : method and recovery means, just pick up the first one in the list # print $_RG_FSCHECK_TOOLS | IFS=, read FSCHECK_TOOL rest print $_RG_RECOVERY_METHODS | IFS=, read RECOVERY_METHOD rest # : If there are any unmounted file systems for this resource group, go : recover and mount them. # if [[ -n $_RG_FILE_SYSTEMS ]] then RG_FILE_SYSTEMS=$(IFS=, set -- $_RG_FILE_SYSTEMS ; print $*) # space separated list activate_fs_process_group "$RECOVERY_METHOD" "$FSCHECK_TOOL" "$RG_FILE_SYSTEMS" RC=$? if (( $RC != 0 && $ERRSTATUS == 0 )) # recovery or mount failure then ERRSTATUS=11 # flag single resource group fi RG_FILE_SYSTEMS="" # done with these fi done return $ERRSTATUS } # this function executes the user configured OEM method # to activate the filesystem. The default is "mount" activate_fs_for_oem () { OEM_FILE_SYSTEMS=$1 integer RET=0 for fs in $OEM_FILE_SYSTEMS do # : get OEM type # OEM_TYPE=$(cl_get_oem_type -f $fs) # : Get user configured OEM method to activate this OEM # OEM_METHOD_TO_ACTIVATE_FS=$(cl_get_oem_method -m "ONLINE" -t $OEM_TYPE) if ! $OEM_METHOD_TO_ACTIVATE_FS "$fs" then RET=1 fi done return $RET } ############################################################################### # # Start of main # ############################################################################### export PROGNAME=${0##*/} export PATH="$(/usr/es/sbin/cluster/utilities/cl_get_path all)" # Including Availability metrics library file . /usr/es/lib/ksh93/availability/cl_amlib if [[ $VERBOSE_LOGGING == "high" ]] then set -x version='1.1.8.5' fi # : Check for mounting OEM file systems # OEM_FS=${OEM_FS:-"false"} if (( $# != 0 )) && [[ "$1" == "-c" ]] then OEM_FS="true" shift fi integer STATUS=0 EMULATE="REAL" # : The environment variable MOUNT_WLMCNTRL_SELFMANAGE is referred inside "mount". : If this variable is set, few calls to wlmcntrl are skipped inside mount, which : offers performance benefits. Hence we will export this variable if it is set : in /etc/environment. # export eval $(grep -w '^MOUNT_WLMCNTRL_SELFMANAGE' /etc/environment) if [[ -n $JOB_TYPE && $JOB_TYPE != "GROUP" ]] then # : If JOB_TYPE is set, and it does not equal to "GROUP", then : we are processing for process_resources, which passes requests : associaed with multiple resource groups through environment variables # activate_fs_process_resources STATUS=$? else # processing single resource group # : If processing a single resource group, the list of file systems is : explicitly passed # if (( $# == 0 )) then cl_echo 12 "usage: $PROGNAME filesystems_to_mount" $PROGNAME exit 2 fi FILE_SYSTEMS=$* if [[ $OEM_FS == "false" ]] then activate_fs_process_group "$RECOVERY_METHOD" "$FSCHECK_TOOL" "$FILE_SYSTEMS" else activate_fs_for_oem "$FILE_SYSTEMS" fi STATUS=$? fi return $STATUS # status from processing group(s)