
#----------------------------------------------------------------------------
#  ALTRAN_PROLOG_BEGIN_TAG                                                    
#  This is an automatically generated prolog.                                  
#                                                                              
#  Copyright (C) Altran ACT S.A.S. 2018,2019,2020,2021.  All rights reserved.  
#                                                                              
#  ALTRAN_PROLOG_END_TAG                                                      
#                                                                              
# %Z%  %ci%  %fn%, %R%, %t%, %G% %U%
#----------------------------------------------------------------------------
# Import Python Modules
#----------------------------------------------------------------------------
import os
import sys
import re
from datetime import datetime
import linecache
#Retrieve python version
version=sys.version_info[0]+(sys.version_info[1]*.1)
#Check for the python version,import imp module depricated from python version3.1 onwards
if  version < 3.1:
    import imp    
    cl_utilities=imp.load_source('cl_utilities','/usr/es/lib/python/cl_utilities')
else:
    import importlib.util
    def import_path(path):
        module_name = os.path.basename(path)
        spec = importlib.util.spec_from_loader(module_name,importlib.machinery.SourceFileLoader(module_name, path))
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        sys.modules[module_name] = module
        return module    
    cl_utilities=import_path('/usr/es/lib/python/cl_utilities')
from collections import OrderedDict
from cl_utilities import *
import json

#----------------------------------------------------------------------------
# Global Definitions
#----------------------------------------------------------------------------
HA_UTILS = "/usr/es/sbin/cluster/utilities"
CLAM_LOG_DIR = "/var/hacmp/availability"
UTILS_LOG = "/var/hacmp/log/clutils.log"
TMP_DIR = "/var/hacmp/tmp/amLogDir"
ACD = "/usr/es/sbin/cluster/etc/objrepos/active"
EMPTY_DATA = "No\ Data"

# Global method to convert seconds in to datetime
def convertSecondsTo(stime):
    """
    Function    : convertSecondsTo
    Description : This function to convert seconds into minutes, hours

    Arguments   : stime - seconds time which need to convert seconds (as string) into datetime

    Return      : datetime format(hour:minute:seconds)
    """
    try:
        if str(stime):
            t=float(stime)
            day= t//86400
            hour= (t-(day*86400))//3600
            minute= (t - ((day*86400) + (hour*3600)))//60
            seconds= t - ((day*86400) + (hour*3600) + (minute*60))
            return '{:02d}:{:02d}:{:.2f}'.format(int(hour),int(minute),seconds)
    except NameError:
        print("warning: String is empty to convert seconds in to time format")

#----------------------------------------------------------------------------
# Classes
#    SiteCentricClass
#    NodeCentricClass
#    ResourceGroup
#    MiscEventsClass
# Functions
#    findEventOccurences
#    parseTimestamp
#    getTimestampDifference
#    getPattern
#    lookUPEvents
#    displayNodeCentricReport
#    displayRGCentricReport
#    nodeCentricReport
#    generateRGCentricReport
#    generateVerificationReport
#    displayMiscellaneousReport
#    displayNetworkUpDown
#    displayswapAdapterEvent
#    siteCentricReport
#    displaySiteCentricReport
#    displayGuiReport 
#----------------------------------------------------------------------------
class MiscEventsClass:
    """
    Class: MiscEventsClass
    Description : Object of this class will contain all 
                  the data for miscellaneous events to 
                  be displayed in report. Each node will
                  have it's own MiscEventsClass object.
    Attribiutes :
                  networkUpEvents - Dictionary containing key as occurence number
                                    where 0 is the latest occurence
                                    value is the list of start time
                                    time taken, end time and network
                                    name simultaneously
                  networkDownEvents - Dictionary containing key as occurence number
                                    where 0 is the latest occurence
                                    value is the list of start time
                                    time taken, end time and network
                                    name simultaneously 
    Functions :  getNetworkUpEvents()
                 getNetworkDownEvents()
                 getSwapAdapterEvents()
    """
    def __init__(self,nodeName):
        """
        Function    : __init__
        Description : This function initializes the data members
                      of the class
        Arguments   : node name
        Return      : None
        """
        self.nodeName=nodeName
        self.networkDownEvents={}
        self.networkUpEvents={}
        self.swapAdapterDict={}
        self.ConfigTooLongDict={}

    def getNetworkUpEvents(self,average,dateList):
        """
        Function    : getNetworkUpEvents 
        Description : This function calls lookUPEvents for "network_up" 
                      and populates networkUpEvents dictionary with data
                      for network_up events 
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : None
        """ 
        data = lookUPEvents("network_up","" , self.nodeName,"",average,dateList)
        data=data[self.nodeName]
        self.networkUpEvents=data
        return None

    def getNetworkDownEvents(self,average,dateList):
        """
        Function    : getNetworkDownEvents
        Description : This function calls lookUPEvents for "network_down"
                      and populates networkDownEvents dictionary with data
                      for network_down events
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : None
        """
        data = lookUPEvents("network_down","" , self.nodeName,"",average,dateList)
        data=data[self.nodeName]
        self.networkDownEvents=data
        return None

    def getSwapAdapterEvents(self,average,dateList):
        """
        Function    : getSwapAdapterEvents
        Description : This function calls lookUPEvents for "swap_adpter"
                      and populates swapadapterEvents dictionary with data
                      for swapadapter events
        Arguments   : average - Report to generate average number of event occurences 
                      dateList - [startdate,enddate]
        Return      : None
        """
        data = lookUPEvents("swap_adapter","" , self.nodeName,"",average,dateList)
        data=data[self.nodeName]
        self.swapAdapterDict=data
        return None

    def getConfigTooLongEvents(self,average,dateList):
        """
        Function    : getConfigTooLongEvents
        Description : This function calls lookUPEvents for "ConfigTooLong"
                      and populates ConfigTooLong dictionary with data
                      for ConfigTooLong events
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : None
        """
        data = lookUPEvents("config_too_long","" , self.nodeName,"",average,dateList)
        data=data[self.nodeName]
        self.ConfigTooLongDict=data
        return None 

    def getAixDownEvents(self,average,dateList):
        """
        Function    : getAixDownEvents
        Description : This function calls lookUPEvents for "aix_down"
                      and populates AixDownDict dictionary with data
                      for aixdown events
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : None
        """
        data = lookUPEvents("aix_down","" , self.nodeName,"",average,dateList)
        data=data[self.nodeName]
        self.AixDownDict=data
        return None

 
class SiteCentricClass:
    """
    Class: SiteCentricClass
    Description : This class will initilize dictionary for
       each site and store timeline of site_up, site_down
       events.
    Attribiutes : nodesInsite - Dictionary to store nodes configured
                     in a site
                  eachSiteData - Dictionary to store timelines of
                     site_up and site_down events
    Functions :   getNodesInSite
                  getSiteDownTimeStamps
                  prepareSiteCentricReport
    """
    siteName = ""
    eachSiteData = {}
    nodesInsite = {}   
    def __init__(self,siteName):
        """
        Function    : __init__
        Description : This function initializes the data members
                      of the class
        Arguments   : Site name
        Return      : None 
        """
        self.siteName = siteName
        self.eachSiteData[siteName]={}
        self.nodesInsite = {}
        self.getNodesInSite()

    def getNodesInSite(self):
        """
        Function    : getNodesInSite
        Description : This function gets the list of nodes in a site and
                      fills in nodesInsite dictionary
        Arguments   : None
        Return      : None
        """
        # Fetch node list configured in each site
        self.nodesInsite[self.siteName]=[]
        command="clodmget -n -q name=%s -f nodelist HACMPsite"%(self.siteName)
        status,output=executeCommand("",command)
        if not status and output != "":
            self.nodesInsite[self.siteName]=output.split(' ')
        else:
            defMsg="Failed to fetch nodes in site: %1$s\n"
            displayMsg("",44,"scripts.cat",70,defMsg,self.siteName)

    def getSiteDownTimeStamps(self,tsList):
        """
        Function    : getSiteDownTimeStamps 
        Description : This function identifies the time stamps of unique occurrences
                      of site_down events and removes the duplicates
        Arguments   : tsList - Sorted list of time stamps of site_down events on all
                               nodes in a site
        Return      : Time stamps of unique occurrences of site_down events
        """
        # Unlike site_up event, site_down event is triggred on each node in site.
        # So, fetch the unique time stamps of site_down events from the sorted list.
        # If the differrence in time stamps is less than or equal to 5 seconds then
        # consider only first occurrence as valid event and remove the other from the list.
        count=0
        while count<(len(tsList)-1):
            tsDiff=getTimestampDifference(tsList[count],tsList[count+1])
            if (tsDiff <= 5):
                tsList.remove(tsList[count+1])
            count += 1
        # Reverse the unique list such that first entry in list is latest event time stamp.
        tsList.reverse()
        return tsList

    def prepareSiteCentricReport(self,average,dateList):
        """
        Function    : prepareSiteCentricReport
        Description : This function fetch the time line information for recent
                      5 occurrences of site_up and site_down events and store the
                      data in multi dimensional dictionary.
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : returns a multi dimentional dictionary with start time and
                      time taken for latest 5 occurrences of site_up and site_down
                      events of a node
        """
        # Fetch the start time and time taken for latest 5 occurences
        # of site_up and site_down events and store it in eachSiteData dictionary.
        siteName=self.siteName
        nodeList=self.nodesInsite[siteName]
        downTime = {}
        upTime = {}
        data = {}
        data[siteName]={}
        data[siteName]["START"]={}
        data[siteName]["STOP"]={}
        self.eachSiteData[siteName]["START"]={}
        self.eachSiteData[siteName]["STOP"]={}
        for node in nodeList:
            # Fetch the time lines of site_up event
            siteUpData=lookUPEvents("site_up",siteName,node,"",average,dateList)
            data[siteName]["START"][node]=siteUpData[node]
            # Store the time of occurrence of site_up events along
            # with node information in upTime dictionary.
            if len(siteUpData[node])>0:
                for occurrence in siteUpData[node].keys():
                    ts = siteUpData[node][occurrence][0]
                    upTime[ts] = node

            # Fetch the time lines of site_down event
            siteData=lookUPEvents("site_down",siteName,node,"",average,dateList)
            data[siteName]["STOP"][node]=siteData[node]

            # Store the time of occurrence of site_down event along
            # with node information in downTime dictionary.
            if len(siteData[node])>0:
                for occurrence in siteData[node].keys():
                    ts = siteData[node][occurrence][0]
                    downTime[ts] = node

        # Sort the time stamps of all elements in upTime dictionary
        siteUpTs=sorted(upTime)
        siteUpTs.reverse()

        # Sort the time stamps of all elements in downTime dictionary
        # and get time stamp list of unique site down events
        sortedTsList=sorted(downTime)
        siteDownTs=self.getSiteDownTimeStamps(sortedTsList)

        # Store the processed site_up events data in eachSiteData
        count = 0
        for ts in siteUpTs:
            if count >= 5:
                break
            node=upTime[ts]
            for occurrence in data[siteName]["START"][node].keys():
                value = data[siteName]["START"][node][occurrence]
                if ts == value[0]:
                    self.eachSiteData[siteName]["START"][count] = value
                    count += 1
        # Store the processed site_down events data in eachSiteData
        count = 0
        for ts in siteDownTs:
            if count >= 5:
                break
            node=downTime[ts]
            for occurrence in data[siteName]["STOP"][node].keys():
                value = data[siteName]["STOP"][node][occurrence]
                if ts == value[0]:
                    self.eachSiteData[siteName]["STOP"][count] = value
                    count += 1
        return self.eachSiteData[siteName]
        
class NodeCentricClass:
    """
    Class: NodeCentricClass
    Description : This class will initilize dictionary for
       each node and store timeline of node_up, node_down
       events.
    Attribiutes : resGroupInNode - Dictionary to store resource
                     groups configured in a node
                  eachNodeData - Dictionary to store timelines of
                     node_up and node_down events
    Functions :   prepareNodeCentricReport 
    """

    resGroupInNode = {}
    eachNodeData= {}
    nodeName = ""
    def __init__(self,nodeName):
        """
        Function    : __init__
        Description : This function initializes the data members
                      of the class
        Arguments   : node name 
        Return      : None 
        """

        self.eachNodeData[nodeName]={}
        self.nodeName=nodeName
        self.latestRohaAcquireData=[]
        self.latestRohaReleaseData=[]
        self.latestRohaAcquireAssessmentEvent=["NULL",0]
        self.latestRohaReleaseAssessmentEvent=["NULL",0]

        # Fetch the resource groups configured in a node and
        # initialize the dictionary resGroupInNode.
        self.resGroupInNode[nodeName]=[]
        rgs = ""
        cmd = "clodmget -n -f group,nodes HACMPgroup|grep -w %s|cut -d':' -f1"%(nodeName)
        status,rgs=executeCommand("",cmd)
        if status:
            defMsg="Failed to fetch resource groups in node: %1$s\n"
            displayMsg("",44,"scripts.cat",17,defMsg,nodeName)
        elif rgs:
            self.resGroupInNode[nodeName]=rgs.split('\n')

    def prepareNodeCentricReport(self,average,dateList):
        """
        Function    : prepareNodeCentricReport
        Description : This function fetch the time line information for recent
                      5 occurences of node_up and node_down events and store the
                      data in multi dimensional dictionary.
        Arguments   : node name,average
                      dateList - [startdate,enddate]
        Return      : returns a multi dimentional dictionary with start time and
                      time taken for latest 5 occurrences of node_up and node_down
                      events of a node
        """
        # Fetch the start time and time taken for latest 5 occurences
        # of node_up event and store it in eachNodeData dictionary.
        # For node centric information, resName and nodeName arguments of
        # 'lookUPEvents' function will be same.
        nodeName=self.nodeName
        nodeData=lookUPEvents("node_up",nodeName,nodeName,"",average,dateList)
        self.eachNodeData[nodeName]["START"]=nodeData[nodeName]

        # Fetch the start time and time taken for latest 5 occurences
        # of node_down event and store it in eachNodeData dictionary
        nodeData=lookUPEvents("node_down",nodeName,nodeName,"",average,dateList)
        self.eachNodeData[nodeName]["STOP"]=nodeData[nodeName]

        # Fetch the start time and time taken for latest 5 occurences
        # of ffdc collection in eachNodeData dictionary
        nodeData=lookUPEvents("ffdc",nodeName,nodeName,"",average,dateList)
        self.eachNodeData[nodeName]["FFDC"]=nodeData[nodeName]

        return self.eachNodeData[nodeName]
        
    def prepareROHAReport(self,average,dateList):
        """
        Function    : prepareROHAReport
        Description : This function fetch the time line information for latest and recent
                      occurences of ROHA acquire and release events and store the
                      data in multi dimensional dictionary.
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : NA
        """
        nodeName=self.nodeName
        
        rohaAcquireEventData=lookUPEvents("roha_acquire","",nodeName,"",average,dateList)
        if len(rohaAcquireEventData[nodeName]) > 0:
            self.latestRohaAcquireData=rohaAcquireEventData[nodeName][0]
        
        rohaReleaseEventData=lookUPEvents("roha_release","",nodeName,"",average,dateList)
        if len(rohaReleaseEventData[nodeName]) > 0:
            self.latestRohaReleaseData=rohaReleaseEventData[nodeName][0]
        
        rohaAcquireAssessmentData=self.getRohaAssessmentDetails("online",average,dateList)
        if len(rohaAcquireAssessmentData) > 0:
            self.latestRohaAcquireAssessmentEvent=rohaAcquireAssessmentData
        
        rohaReleaseAssessmentData=self.getRohaAssessmentDetails("offline",average,dateList)
        if len(rohaReleaseAssessmentData) > 0:
            self.latestRohaReleaseAssessmentEvent=rohaReleaseAssessmentData
            
    def getRohaAssessmentDetails(self,operation,average,dateList):
        """
        Function    : getRohaAssessmentDetails
        Description : This function fetches the time line information for latest ROHA
                      assessment during acquire or release.
        Arguments   : operation - "online" or "offline"
                      average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : list [start Timestamp,time taken]
        """
        rohaEventData=['NULL',0]
        found=False
        node=self.nodeName
        if operation == "online":
            nodeEvents=lookUPEvents("node_up",node,node,"",average,dateList)
            rohaOperationEvents=lookUPEvents("roha_acquire","",node)
        else:
            nodeEvents=lookUPEvents("node_down",node,node,"",average,dateList)
            rohaOperationEvents=lookUPEvents("roha_release","",node)
        
        
        latestRohaAssessmentEvent=lookUPEvents("roha_assessment","",node,"",average,dateList)
        if len(latestRohaAssessmentEvent[node].keys()) == 0:
            return rohaEventData
        else:
            maxRohaAssessments=int(latestRohaAssessmentEvent[node][0][2].strip("\n").split()[-1])
            # fetching 'maxRohaAssessments' number of occurrences
            rohaAssessmentEvents=lookUPEvents("roha_assessment","",node,"",maxRohaAssessments,"")
            for nodeOccurrence in nodeEvents[node].keys():
                nodeBeginTS=nodeEvents[node][nodeOccurrence][0]
                nodeEndTS=nodeEvents[node][nodeOccurrence][2]
                rohaAssessmentTimeTaken=0
                rohaAssessmentTS=""
                for rohaOccurrence in rohaAssessmentEvents[node].keys():
                    rohaBeginTS=rohaAssessmentEvents[node][rohaOccurrence][0]
                    # Checking whether this occurrence of ROHA assessment happened between 
                    # node_up/node_down and node_up_complete/node_down_complete
                    if checkTimeStamp(nodeBeginTS,nodeEndTS,rohaBeginTS):
                        rohaAssessmentTimeTaken+=rohaAssessmentEvents[node][rohaOccurrence][1]
                        rohaAssessmentTS=rohaBeginTS
                        # setting found to True if latest occurrence of ROHA assessment details are found
                        found=True
                        
                
                rohaOperationTime=0
                for rohaOccurrence in rohaOperationEvents[node].keys():
                    rohaOperationBeginTS=rohaOperationEvents[node][rohaOccurrence][0]
                    # Checking whether acquire or release happened during this occurence of node_up/node_down
                    if checkTimeStamp(nodeBeginTS,nodeEndTS,rohaOperationBeginTS):
                        rohaOperationTime=rohaOperationEvents[node][rohaOccurrence][1]
                        rohaOperationDes=rohaOperationEvents[node][rohaOccurrence][2]
                
                # if acquire or release happened, then ROHA:ASSESSMENT:END event will be after ACQUIRE:END
                # Therefore, subtracting acquire or release time from total ROHA assessment time
                rohaAssessmentTimeTaken=rohaAssessmentTimeTaken-rohaOperationTime
                rohaEventData=[rohaAssessmentTS,rohaAssessmentTimeTaken]
                if found:
                    break
                
        return rohaEventData
        

def mappingData(rgData,resData):
    """
    Function    : mappingData
    Description : This function maps the timeline information of
                  resource group events with timeline information of
                  resources configured to resource group.
    Arguments   : rgData - Dictionary with timeline information of RG events
                  resData - Dictionary with timeline information of resource in RG
    Return      : Dictionary with timeline event information of resource mapped to RG events 
    """
    # For each RG event, map the timelines of resource events
    for index in rgData.keys():
        rgStartTime=datetime.strptime(rgData[index][0], '%Y-%m-%dT%H:%M:%S.%f')
        rgStopTime=datetime.strptime(rgData[index][2], '%Y-%m-%dT%H:%M:%S.%f')

        if index<(len(resData)):  
            resStartTime=datetime.strptime(resData[index][0], '%Y-%m-%dT%H:%M:%S.%f')
            resStopTime=datetime.strptime(resData[index][2], '%Y-%m-%dT%H:%M:%S.%f')
            # If the start/stop time of resource is not in between start and stop time
            # of RG, mark the index as "No Data" and move the timeline information of other
            # events to the next indexes.
            if not (rgStartTime<resStartTime<rgStopTime and rgStartTime<resStopTime<rgStopTime):
                size=len(resData)
                while index<size:
                    resData[size]=resData[size-1]
                    size -= 1
                resData[size]=['No\ Data',0,'No\ Data']
    return resData

class ResourceGroup:
    """
    Class        : ResourceGroup
    Description  : This class has following data members and functions.
                   An object of this class will be created for each Resource Group
    Data members : resourcesInRG["SERVICE_LABEL"] - Has list of service IPs configured in the RG
                   resourcesInRG["VOLUME_GROUP"] - Has list of Volume Groups configured in the RG
                   resourcesInRG["FILESYSTEM"] - Has list of Filesystems configured in the RG
                   resourcesInRG["APPLICATION"] - Has list of Application controllers configured in the RG
                   resourcesInRG["NFS"] - Has list of NFS configured in the RG
                   resourcesInRG["WPAR_NAME"] - Has WPAR configured in the RG
                   
                   resourceOnlineTime,resourceOfflineTime - Dictionary with 
                                        Key - nodename, resourceType and resource name
                                        Value - list with timestamp and time taken
                                        
                   averageOnlineTime, averageOfflineTime - Dictionary with 
                                        Key - nodename, resourceType and resource name
                                        Value - Average time taken
                                        
                   totalOnlineTime,totalOfflineTime - Dictionary with
                                            Key - nodename
                                            Value - time taken
                   resourceAppMonTime[node]["APPLICATION_MONITOR"][monitor] - Dictionary with
                                            Key - nodename
                                            Value - Dictionary with
                                                    Key - APPLICATION_MONITOR
                                                    Value - Dictionary with
                                                            Key - Application monitor name
                                                            Value - list of times
    Functions    : getResourcesInRG
                   prepareRGReport
                   calculateTotalTime
    """
    def __init__(self,rgName,average,dateList):
        """
        Function    : __init__
        Description : This function initializes the data members
                      of the class
        Arguments   : RG name,average
                      dateList - [startdate,enddate]
        Return      : None 
        """
        self.rgName=rgName
        self.resourcesInRG={}
        self.resourcesInRG["SERVICE_LABEL"]=[]
        self.resourcesInRG["VOLUME_GROUP"]=[]
        self.resourcesInRG["FILESYSTEM"]=[]
        self.resourcesInRG["APPLICATIONS"]=[]
        self.resourcesInRG["APPLICATION_MONITOR"]=[]
        self.resourcesInRG["NFS"]=[]
        self.resourcesInRG["WPAR_NAME"]=[]
        self.participatingNodes=[]
        self.nodesToAnalyze=[]
        # Fetching the list of participating nodes of this RG
        command="clodmget -q group=%s -n -f nodes HACMPgroup" % self.rgName
        status,output=executeCommand("",command)
        self.participatingNodes=output.split()
        
        # Nodes that are common in participating nodes and input nodes are considered for analysis
        self.nodesToAnalyze=list(set(self.participatingNodes).intersection(nodeList))
        
        # getResourcesInRG function will fetch resources in RG from ODMs and fill resourcesInRG dictionary
        status=self.getResourcesInRG()
        
        # totalOnlineTime, totalOfflineTime is a dictionary with nodename as key and value is time taken for RG to come online or offline
        self.totalOnlineTime={}
        self.totalOfflineTime={}
        
        
        # networkOnlineTime,networkOfflineTime - sum of time taken to bring all serviceIPs in this RG online/offline
        # storageOnlineTime,storageOfflineTime - sum of time taken to bring all VolumeGroups and Filesystems in this RG online/offline
        # applicationOnlineTime,applicationOfflineTime - sum of time taken to bring all applications in this RG online/offline
        self.networkOnlineTime={}
        self.storageOnlineTime={}
        self.applicationOnlineTime={}
        
        self.networkOfflineTime={}
        self.storageOfflineTime={}
        self.applicationOfflineTime={}
        
        # averageOnlineTime, averageOfflineTime is a dictionary with nodename,Resource type, Resource name as key and value is average time taken for RG to come online or offline
        self.averageOnlineTime={}
        self.averageOfflineTime={}
        
        # resourceOnlineTime, resourceOfflineTime is a dictionary with nodename, ResourceType and resource name as key and value is the list with timestamp and time taken
        self.resourceOnlineTime={}
        self.resourceOfflineTime={}
        # rgOnlineTime,rgOfflineTime is a dictionary with the list of individual node and individual RG online/offline timestamp and time taken
        self.rgOnlineTime={} 
        self.rgOfflineTime={} 
        # rgtotalOnlineTime,rgtotalOfflineTime is a dictionary with the list of total time taken for RG online/offline
        self.rgtotalOnlineTime={} 
        self.rgtotalOfflineTime={} 
        # latestOnlineTime,latestOfflineTime is a dictionary with the list of latest start event occured for RG online/offline
        self.latestOnlineTime={}
        self.latestOfflineTime={}
        # rgaverageOnlineTime,rgaverageOfflineTime is a dictionary with the list of average time taken for RG online/offline
        self.rgaverageOnlineTime={}
        self.rgaverageOfflineTime={}
        self.resourceAppMonTime={}
        
        for node in self.nodesToAnalyze:
            self.resourceOnlineTime[node]={}
            self.resourceOnlineTime[node]["SERVICE_LABEL"]={}
            self.resourceOnlineTime[node]["VOLUME_GROUP"]={}
            self.resourceOnlineTime[node]["FILESYSTEM"]={}
            self.resourceOnlineTime[node]["APPLICATIONS"]={}
            self.resourceOnlineTime[node]["NFS"]={}
            self.resourceOnlineTime[node]["WPAR_NAME"]={}
            
            self.resourceOfflineTime[node]={}
            self.resourceOfflineTime[node]["SERVICE_LABEL"]={}
            self.resourceOfflineTime[node]["VOLUME_GROUP"]={}
            self.resourceOfflineTime[node]["FILESYSTEM"]={}
            self.resourceOfflineTime[node]["APPLICATIONS"]={}
            self.resourceOfflineTime[node]["NFS"]={}
            self.resourceOfflineTime[node]["WPAR_NAME"]={}
            
            self.totalOnlineTime[node]=0
            self.totalOfflineTime[node]=0
            
            self.networkOnlineTime[node]=0
            self.storageOnlineTime[node]=0
            self.applicationOnlineTime[node]=0
            
            self.networkOfflineTime[node]=0
            self.storageOfflineTime[node]=0
            self.applicationOfflineTime[node]=0
            
            self.averageOnlineTime[node]={}
            self.averageOnlineTime[node][self.rgName]=0
            self.averageOnlineTime[node]["SERVICE_LABEL"]={}
            self.averageOnlineTime[node]["VOLUME_GROUP"]={}
            self.averageOnlineTime[node]["FILESYSTEM"]={}
            self.averageOnlineTime[node]["APPLICATIONS"]={}
            self.averageOnlineTime[node]["NFS"]={}
            self.averageOnlineTime[node]["WPAR_NAME"]={}
            
            self.averageOfflineTime[node]={}
            self.averageOfflineTime[node][self.rgName]=0
            self.averageOfflineTime[node]["SERVICE_LABEL"]={}
            self.averageOfflineTime[node]["VOLUME_GROUP"]={}
            self.averageOfflineTime[node]["FILESYSTEM"]={}
            self.averageOfflineTime[node]["APPLICATIONS"]={}
            self.averageOfflineTime[node]["NFS"]={}
            self.averageOfflineTime[node]["WPAR_NAME"]={}
    
            self.rgOnlineTime[node]={}
            self.rgOfflineTime[node]={}
            self.rgtotalOnlineTime[node]={} 
            self.rgtotalOfflineTime[node]={} 
            self.latestOnlineTime[node]={} 
            self.latestOfflineTime[node]={} 
            self.rgaverageOnlineTime[node]={}
            self.rgaverageOfflineTime[node]={}
            self.resourceAppMonTime[node]={}
            self.resourceAppMonTime[node]["APPLICATION_MONITOR"]={}
            
        # Initialize the dictionaries to store time line information of
        # operations performed by smart assists applications
        self.isSARG=False
        SA_app=self.checkIfSARG()
        if SA_app != None:
            self.isSARG=True
            self.latestSAStartOperations = OrderedDict()
            self.latestSAStopOperations = OrderedDict()
            self.averageSAStartOperations = OrderedDict()
            self.averageSAStopOperations = OrderedDict()
            SAStartOperations = OrderedDict() 
            SAStopOperations = OrderedDict()
            # Initialize the operations peformed by SAP, smart assist application
            if SA_app=="SAP":
                # Skip the sub operations for application instance in SAP NFS RG as
                # they are not performed.
                if 'SAP' in self.rgName and 'NFS' not in self.rgName:
                    SAStartOperations["SAP"]=["NFS Availability check","Prerequisites and Start Service","Start SAP instance"]
                    SAStopOperations["SAP"]=["NFS Availability check","Stop SAP instance"]
                    if 'SCS' not in self.rgName:
                        SAStartOperations["SAP"].append("Cleanup [A]SCS")
                    if 'SCS' in self.rgName or 'ERS' in self.rgName:
                        SAStartOperations["SAP"].append("Ensure enqueue table is build")
                else:
                    SAStartOperations["SAP"]=[]
                    SAStopOperations["SAP"]=[] 
 
            # Initialize the operations peformed by ORACLE, smart assist application
            if SA_app=="ORACLE":
                SAStartOperations["ORACLE"]=["Start Oracle Database","Start Oracle Listeners"]
                SAStopOperations["ORACLE"]=["Stop Oracle Database","Stop Oracle Listeners"]
            
            # Initialize the operations peformed by DB2 smart assist application
            if SA_app=="DB2":
                SAStartOperations["DB2"]=["Check if instance already running","Start DB2 Database"]
                SAStopOperations["DB2"]=["Check if instance already running","Stop DB2 Database"]
        
            for node in self.nodesToAnalyze:
                self.latestSAStartOperations[node]={}
                self.averageSAStartOperations[node]={}
                for operation in SAStartOperations[SA_app]:
                    self.latestSAStartOperations[node][operation]=['NULL',0]
                    self.averageSAStartOperations[node][operation]=0
                    
                self.latestSAStopOperations[node]={}
                self.averageSAStopOperations[node]={}
                for operation in SAStopOperations[SA_app]:    
                    self.latestSAStopOperations[node][operation]=['NULL',0]
                    self.averageSAStopOperations[node][operation]=0
                    
                self.getSADetails(SA_app,node,average,dateList)
        
    def getResourcesInRG(self):
        """
        Function    : getResourcesInRG
        Description : This function gets the list of resources in an RG and
                      fills in resourcesInRG dictionary
        Arguments   : None
        Return      : None
        """
        # Fetching list of Resources in the RG
        for resourceType in ["SERVICE_LABEL","APPLICATIONS","VOLUME_GROUP","FILESYSTEM","WPAR_NAME","APPLICATION_MONITOR"]:
            resources=""
            command='clodmget -n -q \"group=%s and name=%s\" -f value HACMPresource' % (self.rgName,resourceType)
            status,resources = executeCommand("",command)
            if status:
                defMsg="Failed to fetch %1$s in Resource Group %2$s\n"
                displayMsg("",44,"scripts.cat",18,defMsg,resourceType,self.rgName)
            
            if len(resources) > 0:
                self.resourcesInRG[resourceType]=resources.split("\n")
        
        # Fetching list of filesystems inside VG, if ALL is mentioned
        if self.resourcesInRG["FILESYSTEM"] == ["ALL"]:
            fsList=[]
            for eachVG in self.resourcesInRG["VOLUME_GROUP"]:
                filesystems=""
                command="lsvgfs %s " % (eachVG)
                status,filesystems = executeCommand("",command)
                if len(filesystems):
                    fsList=fsList+filesystems.split("\n")
            self.resourcesInRG["FILESYSTEM"]=fsList

        # Fetching list of NFS (both v2v3 and v4) in the RG
        resources=[]
        command='clodmget -n -q \"group=%s and name=MOUNT_FILESYSTEM \" -f value HACMPresource' % self.rgName
        status,resources = executeCommand("",command)
        
        if len(resources) > 0:
            self.resourcesInRG["NFS"]=resources.split("\n")
        
        return status
    
    def checkIfSARG(self):
        """
        Function    : checkIfSARG
        Description : This function checks if RG is Smart Assist RG
        Arguments   : None
        Return      : SA_Type if RG is Smart assist RG
                      None if RG is not Smart assist RG  
        """
        command='clodmget -n -q \"name=RESOURCE_GROUP\" -f value HACMPsa_metadata'
        status,SA_RGs=executeCommand("",command)
        app=None 
        if self.rgName in SA_RGs:
            command='clodmget -n -q \"name=RESOURCE_GROUP and value=%s\" -f application_id HACMPsa_metadata'%(self.rgName)
            status,appId=executeCommand("",command)
            command='clodmget -n -q \"name=SMARTASSIST_ID and application_id=%s\" -f value HACMPsa_metadata'%(appId)
            status,SA_ID=executeCommand("",command)
            
            if "SAP" in SA_ID:
                app="SAP"
            elif "Oracle" in SA_ID:
                app="ORACLE"
            elif "DB2" in SA_ID:
                app="DB2"
        return app
    
    def getSADetails(self,SA,node,average,dateList):
        """
        Function    : getSADetails
        Description : This function fetches the Smart assists details by looking up into 
        clavailability logs
        Arguments   : SA_Type, node name, average
                      dateList - [startdate,enddate]
        Return      : None
        """
        # Calculating details for Start operations
        for operation in self.latestSAStartOperations[node].keys():
            if SA=="SAP":
                eachOperationData=lookUPEvents("sap_start",self.rgName,node,operation,average,dateList)
            elif SA == "ORACLE":
                eachOperationData=lookUPEvents("oracle_start",self.rgName,node,operation,average,dateList)
            elif SA == "DB2":
                eachOperationData=lookUPEvents("db2_start",self.rgName,node,operation,average,dateList)
            if len(eachOperationData[node].keys()) > 0:
                self.latestSAStartOperations[node][operation]=eachOperationData[node][0]
                sum=0
                for occurence in eachOperationData[node].keys():
                    sum=sum+eachOperationData[node][occurence][1]
                occurrences=len(eachOperationData[node].keys())
                self.averageSAStartOperations[node][operation]=sum/occurrences
        
        # Calculating details for Stop operations
        for operation in self.latestSAStopOperations[node].keys():
            if SA=="SAP":
                eachOperationData=lookUPEvents("sap_stop",self.rgName,node,operation,average,dateList)
            elif SA == "ORACLE":
                eachOperationData=lookUPEvents("oracle_stop",self.rgName,node,operation,average,dateList)
            elif SA == "DB2":
                eachOperationData=lookUPEvents("db2_stop",self.rgName,node,operation,average,dateList)
            if len(eachOperationData[node].keys()) > 0:
                self.latestSAStopOperations[node][operation]=eachOperationData[node][0]
                sum=0
                for occurence in eachOperationData[node].keys():
                    sum=sum+eachOperationData[node][occurence][1]
                occurrences=len(eachOperationData[node].keys())
                self.averageSAStopOperations[node][operation]=sum/occurrences
        
    def prepareRGReport(self,average,dateList):
        """
        Function    : prepareRGReport
        Description : This function looks for events in clavailability.log 
                      and stores the time taken to bring resource online or offline
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : None
        """
        # Calculating time taken for RG to come online
        for node in self.nodesToAnalyze:
            # Process and fetch timeline information for RG online information
            self.rgOnlineTime[node]=lookUPEvents("rgonline",self.rgName,node,"",average,dateList)[node]
            # Process and fetch timeline information for RG offline information
            self.rgOfflineTime[node]=lookUPEvents("rgoffline",self.rgName,node,"",average,dateList)[node]

            # Check whether service IPs are configured in the RG
            if len(self.resourcesInRG["SERVICE_LABEL"]) > 0:
                self.resourceOnlineTime[node]["SERVICE_LABEL"]={}
                # Calculate time taken by each Service IP to alias and remove on each node
                for serviceIP in self.resourcesInRG["SERVICE_LABEL"]:
                    eachResourceTimeStamps={}
                    command="host %s" % serviceIP
                    status,IP = executeCommand("",command)
                    serviceIPaddr=IP.split(" ")[2]
                    #Handle output of 'host' command in case serviceIP has an alias
                    serviceIPaddr=serviceIPaddr.replace(",","")
                    eachResourceTimeStamps=lookUPEvents("serviceIP_acquire", serviceIPaddr, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the service IP events with that of RG events
                        resData=mappingData(self.rgOnlineTime[node],eachResourceTimeStamps[node]) 
                        self.resourceOnlineTime[node]["SERVICE_LABEL"][serviceIP]=resData
                        
                    eachResourceTimeStamps=lookUPEvents("serviceIP_release", serviceIPaddr, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the service IP events with that of RG events
                        resData=mappingData(self.rgOfflineTime[node],eachResourceTimeStamps[node])
                        self.resourceOfflineTime[node]["SERVICE_LABEL"][serviceIP]=resData
                        
            # Check whether application controllers are configured in the RG        
            if len(self.resourcesInRG["APPLICATIONS"]) > 0:
                # Calculate time taken by each application to start and stop on each node
                for app in self.resourcesInRG["APPLICATIONS"]:
                    eachResourceTimeStamps={}
                    eachResourceTimeStamps=lookUPEvents("app_start", app, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the application events with that of RG events
                        resData=mappingData(self.rgOnlineTime[node],eachResourceTimeStamps[node]) 
                        self.resourceOnlineTime[node]["APPLICATIONS"][app]=resData
                        
                    eachResourceTimeStamps=lookUPEvents("app_stop", app, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the application events with that of RG events
                        resData=mappingData(self.rgOfflineTime[node],eachResourceTimeStamps[node])
                        self.resourceOfflineTime[node]["APPLICATIONS"][app]=resData
            
            # Check whether volume groups are configured in the RG        
            if len(self.resourcesInRG["VOLUME_GROUP"]) > 0:

                # Calculate time taken by volume group to varyon, varyoff and SyncVG on each node.
                # Volume Group has operations like varyon and SyncVG which are called as part of VG activation,
                # hence for resource type VOLUME_GROUP for each resource we have two dictionaries for
                # VARY_ON and SYNC_VG each
 
                for eachVG in self.resourcesInRG["VOLUME_GROUP"]:
                    # Calculate time taken by volume group to varyon on each node
                    eachResourceTimeStamps={}
                    eachResourceTimeStamps=lookUPEvents("vg_acquire", eachVG, node,"",average,dateList)
                    self.resourceOnlineTime[node]["VOLUME_GROUP"][eachVG]={}
                    if node in eachResourceTimeStamps.keys():
                        # Create VARY_ON dictionary only if eachResourceTimeStamps has data for it
                        if (eachResourceTimeStamps[node]):
                            # Map the volume group events with that of RG events
                            resData=mappingData(self.rgOnlineTime[node],eachResourceTimeStamps[node]) 
                            self.resourceOnlineTime[node]["VOLUME_GROUP"][eachVG]["VARY_ON"]=resData

                    # Calculate time taken by volume group to vg_sync after varyon on each node
                    #tmp1 dictionary stores the occurrences for event vg_sync_rg_move
                    tmp1={}
                    tmp1=lookUPEvents("vg_sync_rg_move", eachVG, node,"",average,dateList)
                    #tmp2 dictionary stores the occurrences for event vg_sync_rg_move_complete
                    tmp2={}
                    tmp2=lookUPEvents("vg_sync_rg_move_complete", eachVG, node,"",average,dateList)
                    index=0
                    eachResourceTimeStamps[node]={}
 
                    #combining tmp1 and tmp2 to form eachResourceTimeStamps
                    #for the node. For each vg syncvg is called twice as part
                    #of rg_move and rg_move_complete,hence each occurrence of syncvg
                    #called as part of rg_move is combined with corresponding 
                    #occurrence of syncvg called as part of rg_move_complete 
                    #such that if 0th occurrence in eachResourceTimeStamps[node]
                    # is for syncvg called as part of rg_move, 1st occurrence
                    #will be syncvg called as part of rg_move_complete
  
                    for tmp1_occurrence in range(len(tmp1[node].keys())):
                        for  tmp2_occurrence in range(len(tmp2[node].keys())):
                            if tmp1[node][tmp1_occurrence][3] == tmp2[node][tmp2_occurrence][3]:
                                eachResourceTimeStamps[node].update({index:tmp1[node][tmp1_occurrence]})
                                index+=1
                                eachResourceTimeStamps[node].update({index:tmp2[node][tmp2_occurrence]})
                                index+=1
                                break
                    if node in eachResourceTimeStamps.keys():
                        # Create SYNC_VG dictionary only if eachResourceTimeStamps has data for it
                        if (eachResourceTimeStamps[node]):
                            self.resourceOnlineTime[node]["VOLUME_GROUP"][eachVG]["SYNC_VG"]=eachResourceTimeStamps[node]
                    eachResourceTimeStamps=lookUPEvents("vg_release", eachVG, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the volume group events with that of RG events
                        resData=mappingData(self.rgOfflineTime[node],eachResourceTimeStamps[node])
                        self.resourceOfflineTime[node]["VOLUME_GROUP"][eachVG]=resData
                        
                    # Fetching list of filesystems inside VG
                    filesystems=[]
                    command="lsvgfs %s " % (eachVG)
                    status,filesystems = executeCommand("",command)
                    if len(filesystems):
                        filesystems=filesystems.split("\n")

                    for eachFS in filesystems:
                        eachResourceTimeStamps={}
                        eachResourceTimeStamps=lookUPEvents("fs_acquire", eachFS, node,"",average,dateList)
                        if node in eachResourceTimeStamps.keys():
                            # Map the file system events with that of RG events
                            resData=mappingData(self.rgOnlineTime[node],eachResourceTimeStamps[node]) 
                            self.resourceOnlineTime[node]["FILESYSTEM"][eachFS]=resData 
                        
                        eachResourceTimeStamps=lookUPEvents("fs_release", eachFS, node,"",average,dateList)
                        if node in eachResourceTimeStamps.keys():
                            # Map the file system events with that of RG events
                            resData=mappingData(self.rgOfflineTime[node],eachResourceTimeStamps[node])
                            self.resourceOfflineTime[node]["FILESYSTEM"][eachFS]=resData
                            
            if len(self.resourcesInRG["NFS"]) > 0:
                for eachNFS in self.resourcesInRG["NFS"]:
                    localMount,nfs=eachNFS.split(";")
                    eachResourceTimeStamps={}
                    eachResourceTimeStamps=lookUPEvents("nfs_activate", nfs, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the NFS events with that of RG events
                        resData=mappingData(self.rgOnlineTime[node],eachResourceTimeStamps[node]) 
                        self.resourceOnlineTime[node]["NFS"][eachNFS]=resData
                        
                    eachResourceTimeStamps=lookUPEvents("nfs_deactivate", localMount, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the NFS events with that of RG events
                        resData=mappingData(self.rgOfflineTime[node],eachResourceTimeStamps[node])
                        self.resourceOfflineTime[node]["NFS"][eachNFS]=resData
                        
            if len(self.resourcesInRG["WPAR_NAME"]) > 0:
                for eachWPAR in self.resourcesInRG["WPAR_NAME"]:
                    eachResourceTimeStamps={}
                    eachResourceTimeStamps=lookUPEvents("wpar_start", eachWPAR, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the WPAR events with that of RG events
                        resData=mappingData(self.rgOnlineTime[node],eachResourceTimeStamps[node]) 
                        self.resourceOnlineTime[node]["WPAR_NAME"][eachWPAR]=resData
                        
                    eachResourceTimeStamps=lookUPEvents("wpar_stop", eachWPAR, node,"",average,dateList)
                    if node in eachResourceTimeStamps.keys():
                        # Map the WPAR events with that of RG events
                        resData=mappingData(self.rgOfflineTime[node],eachResourceTimeStamps[node])
                        self.resourceOfflineTime[node]["WPAR_NAME"][eachWPAR]=resData

    def calculateTotalTime(self):
        """
        Function    : calculateTotalTime
        Description : This function calculates the total time taken and average time of each resource
                      and Resource Group
        Arguments   : None
        Return      : None
        """
        for node in self.resourceOnlineTime.keys():

            if(len(self.rgOnlineTime[node].keys())>0):
                # Fetch the total time taken for RG online and its latest event start time 
                self.rgtotalOnlineTime[node]=self.rgOnlineTime[node][0][1]
                self.latestOnlineTime[node]=self.rgOnlineTime[node][0][0]
            if(len(self.rgOfflineTime[node].keys())>0):
                # Fetch the total time taken for RG offline and its latest event start time
                self.rgtotalOfflineTime[node]=self.rgOfflineTime[node][0][1]
                self.latestOfflineTime[node]=self.rgOfflineTime[node][0][0]
            rgOnlinesum=0
            occurrence = len(self.rgOnlineTime[node].keys()) 
            for eachoccurrence in self.rgOnlineTime[node]:
                rgOnlinesum=rgOnlinesum+self.rgOnlineTime[node][eachoccurrence][1]
            if(occurrence>0):
                # Fetch the average time taken for RG online
                self.rgaverageOnlineTime[node]=rgOnlinesum/occurrence
            
            rgOfflinesum=0 
            occurrence = len(self.rgOfflineTime[node].keys()) 
            for eachoccurrence in self.rgOfflineTime[node]:
                rgOfflinesum=rgOfflinesum+self.rgOfflineTime[node][eachoccurrence][1]
            if(occurrence>0):
                # Fetch the average time taken for RG offline
                self.rgaverageOfflineTime[node]=rgOfflinesum/occurrence
             

            for resType in ["WPAR_NAME","SERVICE_LABEL","APPLICATIONS","VOLUME_GROUP","FILESYSTEM","NFS"]:
                for resource in self.resourceOnlineTime[node][resType].keys():
                    totalResourceOnlineTime=0
                    totalResourceOfflineTime=0

                    #in case of resource type as VOLUME_GROUP we have dictionaries for each resource
                    #for SYNC_VG and VARY_ON so initialize averageOnlineTime[node][resType][resource]
                    #with an empty dictionary
                    if resType in ["VOLUME_GROUP"]:
                        self.averageOnlineTime[node][resType][resource]={}
                    for occurrence in self.resourceOnlineTime[node][resType][resource].keys():
                        if resType in ["VOLUME_GROUP"]:
                            totalResourceOnlineTime=0
                            count=0
                            for operationOccurrence in self.resourceOnlineTime[node][resType][resource][occurrence].keys():
                                # condition to check the "No Data" string in dictionary and then increment the count
                                # Exclude this count from the operationOccurrence while calculating the average online time.  
                                if "No\\ Data" in self.resourceOnlineTime[node][resType][resource][occurrence][operationOccurrence]:
                                    count=count+1
                                    continue
                                if occurrence in ("SYNC_VG"):
                                    if operationOccurrence in (0,1):
                                        self.totalOnlineTime[node]+=self.resourceOnlineTime[node][resType][resource][occurrence][operationOccurrence][1]
                                        self.storageOnlineTime[node] +=self.resourceOnlineTime[node][resType][resource][occurrence][operationOccurrence][1]
                                else:
                                    if operationOccurrence == 0:
                                        self.totalOnlineTime[node]+=self.resourceOnlineTime[node][resType][resource][occurrence][operationOccurrence][1]    
                                        self.storageOnlineTime[node] +=self.resourceOnlineTime[node][resType][resource][occurrence][operationOccurrence][1]
                                totalResourceOnlineTime+=self.resourceOnlineTime[node][resType][resource][occurrence][operationOccurrence][1]
                            if totalResourceOnlineTime > 0:
                                totalOccurrence=operationOccurrence-count 
                                if occurrence == "SYNC_VG":
                                    self.averageOnlineTime[node][resType][resource][occurrence]=totalResourceOnlineTime/((totalOccurrence+1)/2)
                                elif occurrence == "VARY_ON":
                                    self.averageOnlineTime[node][resType][resource][occurrence]=totalResourceOnlineTime/(totalOccurrence+1)
                                self.averageOnlineTime[node][self.rgName]+=self.averageOnlineTime[node][resType][resource][occurrence]    
                        else:
                            count=0
                            if occurrence == 0:
                                self.totalOnlineTime[node]+=self.resourceOnlineTime[node][resType][resource][0][1]
                                if resType == "SERVICE_LABEL":
                                    self.networkOnlineTime[node] += self.resourceOnlineTime[node][resType][resource][0][1]
                                elif resType == "FILESYSTEM":
                                    self.storageOnlineTime[node] += self.resourceOnlineTime[node][resType][resource][0][1]
                                elif resType == "APPLICATIONS":
                                    self.applicationOnlineTime[node] += self.resourceOnlineTime[node][resType][resource][0][1]
                            totalResourceOnlineTime+=self.resourceOnlineTime[node][resType][resource][occurrence][1]
                            # condition to check the "No Data" string in dictionary and then increment the count
                            # Exclude this count from the occurrence while calculating the average online time.
                            if "No\\ Data" in self.resourceOnlineTime[node][resType][resource][occurrence]:
                                count=count+1
                                continue
                    if resType not in ["VOLUME_GROUP"]:
                        if totalResourceOnlineTime > 0:
                            totalOccurrence=occurrence-count
                            self.averageOnlineTime[node][resType][resource]=totalResourceOnlineTime/(totalOccurrence+1)
                            self.averageOnlineTime[node][self.rgName]+=self.averageOnlineTime[node][resType][resource]
                        else:
                            self.averageOnlineTime[node][resType][resource]=0
                    
                    for occurrence in self.resourceOfflineTime[node][resType][resource].keys():
                        count=0
                        if occurrence == 0:
                            self.totalOfflineTime[node] += self.resourceOfflineTime[node][resType][resource][0][1]
                            if resType == "SERVICE_LABEL":
                                self.networkOfflineTime[node] += self.resourceOfflineTime[node][resType][resource][0][1]
                            elif resType == "VOLUME_GROUP" or resType == "FILESYSTEM":
                                self.storageOfflineTime[node] += self.resourceOfflineTime[node][resType][resource][0][1]
                            elif resType == "APPLICATIONS":
                                self.applicationOfflineTime[node] += self.resourceOfflineTime[node][resType][resource][0][1]
                        totalResourceOfflineTime+=self.resourceOfflineTime[node][resType][resource][occurrence][1]
                        # condition to check the "No Data" string in dictionary and then increment the count
                        # Exclude this count from the occurrence while calculating the average online time.
                        if "No\\ Data" in self.resourceOfflineTime[node][resType][resource][occurrence]:
                            count=count+1
                            continue
                    if totalResourceOfflineTime > 0:
                        totalOccurrence=occurrence-count
                        self.averageOfflineTime[node][resType][resource]=totalResourceOfflineTime/(totalOccurrence+1)
                        self.averageOfflineTime[node][self.rgName]+=self.averageOfflineTime[node][resType][resource]
                    else:
                        self.averageOfflineTime[node][resType][resource]=0

    def appMonTime(self,average,dateList):
        """
        Function    : appMonTime
        Description : This function builds the resourceAppMonTime dictionary, filling it with
                    : the event duration times.
        Arguments   : average - number of occurrances of the event to use for the report
                    : dateList - [startdate,enddate] 
        Return      : None  
        """
        for node in self.nodesToAnalyze:
            # Check whether application controllers are configured in the RG
            if len(self.resourcesInRG["APPLICATIONS"]) > 0:
                # Calculate time taken by each application to start and stop on each node
                for app in self.resourcesInRG["APPLICATIONS"]:
                    eachResourceTimeStamps={}
                    command="clmgr query application_controller %s | grep MONITORS"%app
                    status,output=executeCommand("",command)
                    appmon=output.split("=")
                    appmon2=appmon[1].replace('"','')
                    # Check if there are appmons for this application server
                    if len(appmon2) > 0:
                        for monitor in appmon2.split(): 
                            eachResourceTimeStamps=lookUPEvents("clappmond",monitor, node,"",average,dateList)
                            self.resourceAppMonTime[node]["APPLICATION_MONITOR"][monitor]=eachResourceTimeStamps
 
class Resource:
    """
    Class        : Resource
    Description  : This class has following data members and functions.
                   An object of this class will be created for each Resource Group
    Data members : resourceType - Type of resource i.e. SERVICE_LABEL or VOLUME_GROUP or APPLICATIONS etc
                   resourceName - Name of the resource
                   rgName       - RG in which this resource belongs to
                   participatingNodes - Participating nodes of the RG
                   nodesToAnalyze - Nodes in which analysis should be done
                   resourceOnlineTime - Timestamp and time taken for resource to come online
                   resourceOfflineTime - Timestamp and time taken for resource to come offline
                   averageOnlineTime - Average time taken for resource to come online
                   averageOfflineTime - Average time taken for resource to come offline
                   
    Functions    : prepareResourceReport
    """
    def __init__(self,resType,resName,average,dateList):
        """
        Function    : __init__
        Description : This function initializes the data members
                      of the class
        Arguments   : Resource Type, resource Name, average
                      dateList - [startdate,enddate]
        Return      : None 
        """
        self.resourceType=resType
        self.resourceName=resName
        # To know whether resource is configured in RG or not, 
        # and FS or NFS can be empty in HACMPresource ODM,
        # corresponding VG for FS or NFS is fetched and
        # checked whether that is configured in RG or not.
        if resType == "FILESYSTEM" or resType == "NFS":
            command="clodmget -n -q value=%s -f name CuAt" % resName
            status,lvName=executeCommand("",command)
            command="lslv %s | grep -w 'VOLUME GROUP'" % lvName
            status,output=executeCommand("",command)
            vgName=output.split()[-1]
            resType="VOLUME_GROUP"
            resName=vgName
            
        command='clodmget -n -q \"name=%s and value=%s\" -f group HACMPresource' % (resType,resName)
        status,self.rgName=executeCommand("",command)
        
        # Fetching the list of participating nodes of this RG
        command="clodmget -q group=%s -n -f nodes HACMPgroup" % self.rgName
        status,output=executeCommand("",command)
        self.participatingNodes=output.split()
        
        # Nodes that are common in participating nodes and input nodes are considered for analysis
        self.nodesToAnalyze=list(set(self.participatingNodes).intersection(nodeList))
        
        # averageOnlineTime, averageOfflineTime is a dictionary with nodename,Resource type, Resource name as key and value is average time taken for RG to come online or offline
        self.averageOnlineTime={}
        self.averageOfflineTime={}
        
        # resourceOnlineTime, resourceOfflineTime is a dictionary with nodename, ResourceType and resource name as key and value is the list with timestamp and time taken
        self.resourceOnlineTime={}
        self.resourceOfflineTime={}
        
        for node in self.nodesToAnalyze:
            self.resourceOnlineTime[node]=['NULL',0]
            self.resourceOfflineTime[node]=['NULL',0]
            self.averageOnlineTime[node]=0
            self.averageOfflineTime[node]=0
        
        self.prepareResourceReport(average,dateList)
            
    def prepareResourceReport(self,average,dateList):
        """
        Function    : prepareResourceReport
        Description : This function looks for events in clavailability.log 
                      and stores the time taken to bring resource online or offline
        Arguments   : average - Report to generate average number of event occurences
                      dateList - [startdate,enddate]
        Return      : None
        """
        # Calculating time taken for Resource to come online
        if self.resourceType == "SERVICE_LABEL":
            onlineEvent="serviceIP_acquire"
            offlineEvent="serviceIP_release"
            # Service IP name is given as input but logs in cl_availability 
            # has IP address in it. Converting Service IP name to IP address
            command="host %s" % self.resourceName
            status,IP = executeCommand("",command)
            self.resourceName=IP.split("is ")[1]
        elif self.resourceType == "WPAR_NAME":
            onlineEvent="wpar_start"
            offlineEvent="wpar_stop"
        elif self.resourceType == "VOLUME_GROUP":
            onlineEvent="vg_acquire"
            offlineEvent="vg_release"
        elif self.resourceType == "FILESYSTEM":
            onlineEvent="fs_acquire"
            offlineEvent="fs_release"
        elif self.resourceType == "NFS":
            onlineEvent="nfs_activate"
            offlineEvent="nfs_deactivate"
        elif self.resourceType == "APPLICATIONS":
            onlineEvent="app_start"
            offlineEvent="app_stop"
        else:
            onlineEvent=""
            offlineEvent=""
        fs_name=self.resourceName 
        for node in self.nodesToAnalyze:
            #Calculating acquire and release time for resource
            totalOnlineTime=0
            totalOfflineTime=0

            eachResourceTimeStamps={}
            # Command to check RG configured with wpar, if so then return the rgname or else empty
            command="clodmget -n -q  'name=WPAR_NAME  and group=%s' -f value HACMPresource" % self.rgName
            status,rg_wpar=executeCommand("",command)
            # Check the RG configured with wpar
            if len(rg_wpar) > 0:
                # Command to get the configured filesystem list from the RG with wpar
                command="clodmget -n -q 'name=FILESYSTEM and group=%s' -f value HACMPresource" % self.rgName
                status,fs_wpar=executeCommand("",command)
                # Wpar RG configured with volume group and without any specific filesystem
                # Then fs_wpar returns as "ALL", that means to display all filesystem associated with volume group
                if fs_wpar == "ALL":
                    self.resourceName='/wpars/' + rg_wpar + fs_name
                else:
                    if len(fs_wpar) > 0:
                        fslist=fs_wpar.split("\n")
                        # Check the filesystem configured in RG with wpar
                        if self.resourceName in fslist:
                            self.resourceName='/wpars/' + rg_wpar + fs_name
            eachResourceTimeStamps=lookUPEvents(onlineEvent, self.resourceName, node,"",average,dateList)
            if len(eachResourceTimeStamps[node].keys()) > 0:
                if node in eachResourceTimeStamps.keys() and len(eachResourceTimeStamps[node].keys()) >0:
                    for occurence in eachResourceTimeStamps[node].keys():
                        if occurence == 0:
                            # when occurence is 0, which means this is the latest time taken
                            self.resourceOnlineTime[node]=eachResourceTimeStamps[node][0]
                        totalOnlineTime+=eachResourceTimeStamps[node][occurence][1]
                    self.averageOnlineTime[node]=totalOnlineTime/len(eachResourceTimeStamps[node].keys())
            
            eachResourceTimeStamps={}
            # If resource Type is NFS, filesystem is given as input, but offline events are logged 
            # with local mount point. Fetching localmount from HACMPresource ODM.
            # For rest of the resource types, resource name is passed to lookUPEvents
            if self.resourceType == "NFS":
                command="odmget HACMPresource | grep -p MOUNT_FILESYSTEM | grep -w %s" % self.resourceName
                status,output=executeCommand("",command)
                localMount=output.split("=")[1].split(";")[0][2:]
                eachResourceTimeStamps=lookUPEvents(offlineEvent, localMount, node,"",average,dateList)
            else:
                eachResourceTimeStamps=lookUPEvents(offlineEvent, self.resourceName, node,"",average,dateList)
            if len(eachResourceTimeStamps[node].keys()) > 0:
                if node in eachResourceTimeStamps.keys():
                    for occurence in eachResourceTimeStamps[node].keys():
                        # when occurence is 0, which means this is the latest time taken
                        if occurence == 0:
                            self.resourceOfflineTime[node]=eachResourceTimeStamps[node][0]
                        totalOfflineTime+=eachResourceTimeStamps[node][occurence][1]
                    self.averageOfflineTime[node]=totalOfflineTime/len(eachResourceTimeStamps[node].keys())            
            
def findEventOccurences(logFile,pattern,resourceName,description="",eDate="",sDate=""):
    """
    Function    : findEventOccurences
    Description : This function finds the events occurrences in the logFile 
                  and returns the list of lines numbers
    Arguments   : full path of log file ,pattern and name of the resource, 
    description of the event(optional argument)
    Return      : List of line numbers of occurrences
    """
    eventLines=[]
    flag=False
    
    if not pattern:
        return eventLines
    #Checking if our pattern is for events for which no res name or description is passed as arguments
    eventList=[':RG:MOVE','NETWORK:UP','NETWORK:DOWN','CONFIGTL:BEGIN','CONFIGTL:END','AIX:METRICS','AIX:METRICS,AIX_DOWN:BEGIN,AIX_DOWN:END']
    for event in eventList:
        if event in pattern:
            flag=True
            break 
    # enumerate module 
    for index, line in enumerate(open(logFile)):
        # Check the line is empty,if not then validate begin and end date 
        if len(line.strip())>0:
            data=(line.split(">"))
            #Converting the availability log event date format <2019-05-18T00:00:05.548906> to an integer number
            #regular expressions are used to replace special characters.
            logdate=re.sub(r'(\||-|:|[a-zA-Z]*)',"",data[1].split('.')[0])
            sDate=re.sub(r'(\||:|-|[a-zA-Z]*)',"",sDate)
            eDate=re.sub(r'(\||:|-|[a-zA-Z]*)',"",eDate)
            #check the condition end date is lessthan specific event date in availability log
            #Exclude the line if the condition matches and avoid to process event in the report
            if len(eDate)>0 and eDate < logdate:
                continue
            #check the condition start date is greaterthan specific event date in availability log
            #Exclude the line if the condition matches and avoid to process event in the report
            if len(sDate)>0 and sDate > logdate:
                continue
        for match in re.finditer(pattern, line):
            if flag or resourceName == "":
                eventLines.append(index+1)    
            else:
                resname=""
                if resourceName:
                    if len(line.split("|")) > 3:
                        resname=line.split("|")[3]
                        resname=resname.strip('\n')
                    
                if resourceName and description:
                    if (resname == resourceName and description in line):
                        eventLines.append(index+1)
                elif resourceName:
                    if (resname == resourceName):
                        eventLines.append(index+1)
                elif description:
                    if (description in line):
                        eventLines.append(index+1)
    return eventLines

def parseTimestamp(line):
    """
    Function    : parseTimestamp
    Description : This function parses the timestamp from the line and returns
    Arguments   : Log line
    Return      : returns parsed timestamp in string format
    """
    return line.split("|")[1]
    
def getTimestampDifference(beginTimestamp, endTimestamp):
    """
    Function    : getTimestampDifference
    Description : This function returns the time difference of begin and end Timestamps
    Arguments   : Begin time stamp and end time stamp in string format
    Return      : returns time difference of the timestamps in seconds
    """
    # convert timestamps in string into datetime objects
    t1Object=datetime.strptime(beginTimestamp, '%Y-%m-%dT%H:%M:%S.%f')
    t2Object=datetime.strptime(endTimestamp, '%Y-%m-%dT%H:%M:%S.%f')
    delta=t2Object - t1Object
    # delta.total_seconds() gives exact time difference with milliseconds precision
    return delta.total_seconds()

def checkTimeStamp(beginTS,endTS,eventTS):
    """
    Function    : checkTimeStamp
    Description : This function checks whether eventTimestamp is in between 
                  beginTimestamp and endTimestamp
    Arguments   : Begin time stamp, end time stamp, event time stamp in string format
    Return      : returns True if eventTS is in between beginTS and endTS
                          False if eventTS is not in between beginTS and endTS
    """
    beginTSObject=datetime.strptime(beginTS, '%Y-%m-%dT%H:%M:%S.%f')
    endTSObject=datetime.strptime(endTS, '%Y-%m-%dT%H:%M:%S.%f')
    eventTSObject=datetime.strptime(eventTS, '%Y-%m-%dT%H:%M:%S.%f')
    
    if beginTSObject <= eventTSObject <= endTSObject:
        return True
    else:
        return False
        
    
def getPattern(eventName):
    """
    Function    : getPattern
    Description : This function initializes the patterns based on event name
    Arguments   : event name
    Return      : None
    """
    global BEGIN_PATTERN, END_PATTERN, BEGIN_FAILURE_PATTERN, END_FAILURE_PATTERN
    global FAILURE_PATTERN
 
    if eventName == "node_up":
        BEGIN_PATTERN = "<EVENT:NODE:UP:BEGIN>"
        END_PATTERN = "<EVENT:NODE:UP_COMPLETE:END>"
        BEGIN_FAILURE_PATTERN = "<EVENT:NODE:UP:FAILURE>"
        END_FAILURE_PATTERN = "<EVENT:NODE:UP_COMPLETE:FAILURE>"
    elif eventName == "node_down":
        BEGIN_PATTERN = "<EVENT:NODE:DOWN:BEGIN>"
        END_PATTERN = "<EVENT:NODE:DOWN_COMPLETE:END>"
        BEGIN_FAILURE_PATTERN = "<EVENT:NODE:DOWN:FAILURE>"
        END_FAILURE_PATTERN = "<EVENT:NODE:DOWN_COMPLETE:FAILURE>"
    elif eventName == "serviceIP_acquire":
        BEGIN_PATTERN = "<EVENT:SERVICEIP:ALIAS:BEGIN>"
        END_PATTERN = "<EVENT:SERVICEIP:ALIAS:END>"
        FAILURE_PATTERN = "<EVENT:SERVICEIP:ALIAS:FAILURE>"
    elif eventName == "serviceIP_release":
        BEGIN_PATTERN = "<EVENT:SERVICEIP:DEALIAS:BEGIN>"
        END_PATTERN = "<EVENT:SERVICEIP:DEALIAS:END>"
        FAILURE_PATTERN = "<EVENT:SERVICEIP:DEALIAS:FAILURE>"
    elif eventName == "app_start":
        BEGIN_PATTERN = "<EVENT:APP:START:BEGIN>"
        END_PATTERN = "<EVENT:APP:START:END>"
        FAILURE_PATTERN = "<EVENT:APP:START:FAILURE>"
    elif eventName == "app_stop":
        BEGIN_PATTERN = "<EVENT:APP:STOP:BEGIN>"
        END_PATTERN = "<EVENT:APP:STOP:END>"
        FAILURE_PATTERN = "<EVENT:APP:STOP:FAILURE>"
    elif eventName == "fs_acquire":
        BEGIN_PATTERN = "<EVENT:FS:MOUNT:BEGIN>"
        END_PATTERN = "<EVENT:FS:MOUNT:END>"
        FAILURE_PATTERN = "<EVENT:FS:MOUNT:FAILURE>"
    elif eventName == "fs_release":
        BEGIN_PATTERN = "<EVENT:FS:UMOUNT:BEGIN>"
        END_PATTERN = "<EVENT:FS:UMOUNT:END>"
        FAILURE_PATTERN = "<EVENT:FS:UMOUNT:FAILURE>"
    elif eventName == "vg_acquire":
        BEGIN_PATTERN = "<EVENT:VG:VARYON:BEGIN>"
        END_PATTERN = "<EVENT:VG:VARYON:END>"
        FAILURE_PATTERN = "<EVENT:VG:VARYON:FAILURE>"
    elif eventName == "vg_release":
        BEGIN_PATTERN = "<EVENT:VG:VARYOFF:BEGIN>"
        END_PATTERN = "<EVENT:VG:VARYOFF:END>"
        FAILURE_PATTERN = "<EVENT:VG:VARYOFF:FAILURE>"
    elif eventName == "rg_move_release":
        BEGIN_PATTERN = "<EVENT:RG:MOVE_RELEASE:BEGIN>"
        END_PATTERN = "<EVENT:RG:MOVE_RELEASE:END>"
        FAILURE_PATTERN = "<EVENT:RG:MOVE_RELEASE:FAILURE>"
    elif eventName == "rg_move_acquire":
        BEGIN_PATTERN = "<EVENT:RG:MOVE_ACQUIRE:BEGIN>"
        END_PATTERN = "<EVENT:RG:MOVE_ACQUIRE:END>"
        FAILURE_PATTERN = "<EVENT:RG:MOVE_ACQUIRE:FAILURE>"
    elif eventName == "rg_move_fence":
        BEGIN_PATTERN = "<EVENT:RG:MOVE_FENCE:BEGIN>"
        END_PATTERN = "<EVENT:RG:MOVE_FENCE:END>"
        FAILURE_PATTERN = "<EVENT:RG:MOVE_FENCE:FAILURE>"
    elif eventName == "rg_move":
        BEGIN_PATTERN = "<EVENT:RG:MOVE:BEGIN>"
        END_PATTERN = "<EVENT:RG:MOVE:END>"
        FAILURE_PATTERN = "<EVENT:RG:MOVE:FAILURE>"
    elif eventName == "site_up":
        BEGIN_PATTERN = "<EVENT:SITE:UP:BEGIN>"
        END_PATTERN = "<EVENT:SITE:UP_COMPLETE:END>"
        BEGIN_FAILURE_PATTERN = "<EVENT:SITE:UP:FAILURE>"
        END_FAILURE_PATTERN = "<EVENT:SITE:UP_COMPLETE:FAILURE>"
    elif eventName == "site_down":
        BEGIN_PATTERN = "<EVENT:SITE:DOWN:BEGIN>"
        END_PATTERN = "<EVENT:SITE:DOWN_COMPLETE:END>"
        BEGIN_FAILURE_PATTERN = "<EVENT:SITE:DOWN:FAILURE>"
        END_FAILURE_PATTERN = "<EVENT:SITE:DOWN_COMPLETE:FAILURE>"
    elif eventName == "nfs_activate":
        BEGIN_PATTERN = "<EVENT:NFS:ACTIVATE:BEGIN>"
        END_PATTERN = "<EVENT:NFS:ACTIVATE:END>"
        FAILURE_PATTERN = "<EVENT:NFS:ACTIVATE:FAILURE>"
    elif eventName == "nfs_deactivate":
        BEGIN_PATTERN = "<EVENT:NFS:DEACTIVATE:BEGIN>"
        END_PATTERN = "<EVENT:NFS:DEACTIVATE:END>"
        FAILURE_PATTERN = "<EVENT:NFS:DEACTIVATE:FAILURE>"
    elif eventName == "wpar_start":
        BEGIN_PATTERN = "<EVENT:WPAR:START:BEGIN>"
        END_PATTERN = "<EVENT:WPAR:START:END>"
        FAILURE_PATTERN = "<EVENT:WPAR:START:FAILURE>"
    elif eventName == "wpar_stop":
        BEGIN_PATTERN = "<EVENT:WPAR:STOP:BEGIN>"
        END_PATTERN = "<EVENT:WPAR:STOP:END>"
        FAILURE_PATTERN = "<EVENT:WPAR:STOP:FAILURE>"
    elif eventName == "swap_adapter":
        BEGIN_PATTERN = "<EVENT:NETWORK:SWAP_ADAPTER:BEGIN>"
        END_PATTERN = "<EVENT:NETWORK:SWAP_ADAPTER:END>"
        FAILURE_PATTERN = "<EVENT:NETWORK:SWAP_ADAPTER:FAILURE>"
    elif eventName=="sap_start":
        BEGIN_PATTERN="<EVENT:SA:SAP_START:BEGIN>"
        END_PATTERN="<EVENT:SA:SAP_START:END>"
        FAILURE_PATTERN="<EVENT:SA:SAP_START:FAILURE>"
    elif eventName=="sap_stop":
        BEGIN_PATTERN="<EVENT:SA:SAP_STOP:BEGIN>"
        END_PATTERN="<EVENT:SA:SAP_STOP:END>"
        FAILURE_PATTERN="<EVENT:SA:SAP_STOP:FAILURE>"
    elif eventName=="oracle_start":
        BEGIN_PATTERN="<EVENT:SA:ORACLE_START:BEGIN>"
        END_PATTERN="<EVENT:SA:ORACLE_START:END>"
        FAILURE_PATTERN="<EVENT:SA:ORACLE_START:FAILURE>"
    elif eventName=="oracle_stop":
        BEGIN_PATTERN="<EVENT:SA:ORACLE_STOP:BEGIN>"
        END_PATTERN="<EVENT:SA:ORACLE_STOP:END>"
        FAILURE_PATTERN="<EVENT:SA:ORACLE_STOP:FAILURE>"
    elif eventName=="db2_start":
        BEGIN_PATTERN="<EVENT:SA:DB2_START:BEGIN>"
        END_PATTERN="<EVENT:SA:DB2_START:END>"
        FAILURE_PATTERN="<EVENT:SA:DB2_START:FAILURE>"
    elif eventName=="db2_stop":
        BEGIN_PATTERN="<EVENT:SA:DB2_STOP:BEGIN>"
        END_PATTERN="<EVENT:SA:DB2_STOP:END>"
        FAILURE_PATTERN="<EVENT:SA:DB2_STOP:FAILURE>"
    elif eventName == "ffdc":
        BEGIN_PATTERN = "<EVENT:FFDC:BEGIN>"
        END_PATTERN = "<EVENT:FFDC:END>"
        FAILURE_PATTERN = "<EVENT:FFDC:FAILURE>"
    elif eventName == "verification":
        BEGIN_PATTERN = "<EVENT:VERIFICATION:BEGIN>"
        END_PATTERN = "<EVENT:VERIFICATION:END>"
        FAILURE_PATTERN = "<EVENT:VERIFICATION:FAILURE>"
    elif eventName == "roha_acquire":
        BEGIN_PATTERN = "<EVENT:ROHA:ACQUIRE:BEGIN>"
        END_PATTERN = "<EVENT:ROHA:ACQUIRE:END>"
        FAILURE_PATTERN = ""
    elif eventName == "roha_release":
        BEGIN_PATTERN = "<EVENT:ROHA:RELEASE:BEGIN>"
        END_PATTERN = "<EVENT:ROHA:RELEASE:END>"
        FAILURE_PATTERN = ""
    elif eventName == "roha_assessment":
        BEGIN_PATTERN = "<EVENT:ROHA:ASSESSMENT:BEGIN>"
        END_PATTERN = "<EVENT:ROHA:ASSESSMENT:END>"
        FAILURE_PATTERN = "<EVENT:ROHA:ASSESSMENT:FAILURE>"
    elif eventName == "network_up":
        BEGIN_PATTERN = "<EVENT:NETWORK:UP:BEGIN>"
        END_PATTERN = "<EVENT:NETWORK:UP_COMPLETE:END>"
        FAILURE_PATTERN = "<EVENT:NETWORK:UP:FAILURE>"
    elif eventName == "network_down":
        BEGIN_PATTERN = "<EVENT:NETWORK:DOWN:BEGIN>"
        END_PATTERN = "<EVENT:NETWORK:DOWN_COMPLETE:END>"
        FAILURE_PATTERN = "<EVENT:NETWORK:DOWN:FAILURE>"
    elif eventName == "synchronization":
        BEGIN_PATTERN = "<EVENT:SYNCHRONIZATION:BEGIN>"
        END_PATTERN = "<EVENT:SYNCHRONIZATION:END>"
        FAILURE_PATTERN = "<EVENT:SYNCHRONIZATION:FAILURE>"
    elif eventName == "vg_sync_rg_move":
        BEGIN_PATTERN = "<EVENT:VG:SYNC:BEGIN>"
        END_PATTERN = "<EVENT:VG:SYNC:END>"
        FAILURE_PATTERN = "<EVENT:VG:SYNC:FAILURE>"
    elif eventName == "vg_sync_rg_move_complete":
        BEGIN_PATTERN = "<EVENT:VG:SYNC_COMPLETE:BEGIN>"
        END_PATTERN = "<EVENT:VG:SYNC_COMPLETE:END>"
        FAILURE_PATTERN = "<EVENT:VG:SYNC_COMPLETE:FAILURE>"
    elif eventName == "dare_reconfig":
        BEGIN_PATTERN = "<EVENT:RECONFIG:RESOURCE_ACQUIRE:BEGIN>"
        END_PATTERN = "<EVENT:RECONFIG:CONFIG_COMPLETE:END>"
        FAILURE_PATTERN = "<EVENT:RECONFIG:CONFIG_COMPLETE:FAILURE>"   
    elif eventName == "rgonline":
        BEGIN_PATTERN = "<EVENT:RG:ACQUIRE:BEGIN>"
        END_PATTERN = "<EVENT:RG:ACQUIRE:END>"
        FAILURE_PATTERN = "<EVENT:RG:ACQUIRE:FAILURE>"
    elif eventName == "rgoffline":
        BEGIN_PATTERN = "<EVENT:RG:RELEASE:BEGIN>"
        END_PATTERN = "<EVENT:RG:RELEASE:END>"
        FAILURE_PATTERN = "<EVENT:RG:RELEASE:FAILURE>"
    elif eventName == "clappmond":
        BEGIN_PATTERN = "<EVENT:APPMON:BEGIN>"
        END_PATTERN = "<EVENT:APPMON:END>"
        FAILURE_PATTERN = "<EVENT:APPMON:FAILURE>"
    elif eventName == "config_too_long":
        BEGIN_PATTERN = "<EVENT:CONFIGTL:BEGIN>"
        END_PATTERN = "<EVENT:CONFIGTL:END>"
        FAILURE_PATTERN = "<EVENT:CONFIGTL:FAILURE>"
    elif eventName == "aix_down":
        BEGIN_PATTERN = "<EVENT:AIX_DOWN:BEGIN>"
        END_PATTERN = "<EVENT:AIX_DOWN:END>"
        FAILURE_PATTERN = "<EVENT:AIX_DOWN:FAILURE>" 
    else:
        BEGIN_PATTERN = ""
        END_PATTERN = ""
        BEGIN_FAILURE_PATTERN = ""
        END_FAILURE_PATTERN = ""
        
    return None
    
def lookUPEvents(eventName, resName, nodeName,description="",occurenceCount=5,dateList=""):
    """ 
    Function    : lookUPEvents
    Description : This function looks up the clavailability.log files on all nodes 
                  and returns the time taken of 5 occurrences for event that is sent as argument
    Arguments   : eventName - Event name(node_up, node_down, serviceIP_acquire, serviceIPrelease etc)
                              for which data in clavailability.log files is processed
                  resName - Resource name on which event operation is performed
                  nodeName - Node name on which the events has to be looked up.
                  description - Description of the event (optional argument)
                  dateList - [startdate,enddate]
    Return      : Dictionary with first key as resource name and second key 
                  as integer from 0-4 with value as time taken(in seconds) for that occurence
                  Ex : To access time taken for node_up by node "node1" for first occurence
                  allResourcesData["node1"][0]
    """
    if (nodeName):
        nodeList=nodeName
    else:
        status,nodeList = executeCommand("","clnodename")
        if status:
            msg = "echo 'Failed to get the node list from node %s' >>%s"%(node,UTILS_LOG)
            status,output = executeCommand("",msg)
    #setting isNetworkEvent, which indicates the function is called by network_up or network_down
  
    if(eventName in ("network_up","network_down")):
        isNetworkEvent=True
    else:
        isNetworkEvent=False

    # allResourcesData is dictionary, with first key as node name and second key as occurence number
    # with value as time taken in seconds
    allResourcesData={}
    for node in nodeList.split("\n"):
        allResourcesData[node]={}
        # eachResourceData is a dictionary maintained for each node
        # key is occurence number and value is time taken in seconds
        eachResourceData={}
        count=0
        # Iterate through all cycled cl_availbility log files on a particular node
        for log in range(0,8):
            if log == 0:
                logFile=TMP_DIR+"/%s/clavailability.log" % (node)
            else:
                logFile=TMP_DIR+"/%s/clavailability.log.%s" % (node,str(log))
            
            # break if file not exists
            if not os.path.isfile(logFile):
                break
            else:
                flag = 0
                # Initialize the patters based on the event name
                getPattern(eventName)
                sDate="" 
                eDate=""
                #Condition check to verify both starting date and end date in list
                if len(dateList)>0:
                    sDate=dateList[0]
                    if len(dateList)==2:
                        eDate=dateList[1]
                if(eventName == "swap_adapter" or eventName == "config_too_long"):
                    # Consider respective node swap adapter events available in log file  
                    # Find the occurrences(line numbers) for begin, end, Failure events
                    beginEvents=findEventOccurences(logFile,BEGIN_PATTERN,node,description,eDate,sDate)
                    endEvents=findEventOccurences(logFile,END_PATTERN,node,description,eDate,sDate)
                    failureEvents=findEventOccurences(logFile,FAILURE_PATTERN,node,description,eDate,sDate)
                else:
                    # Find the occurrences(line numbers) for begin, end, beginFailure, endFailure events
                    beginEvents=findEventOccurences(logFile,BEGIN_PATTERN,resName,description,eDate,sDate)
                    endEvents=findEventOccurences(logFile,END_PATTERN,resName,description,eDate,sDate)
                    #Consider only the proper end events from the endEvents list
                    #So that begin and end event combination alone consider to generate a report  
                    if(eventName == "aix_down"):
                        endval=[]
                        for val in range(len(beginEvents)):
                            for value in range(len(endEvents)):
                                if endEvents[value] > beginEvents[val]:
                                    endval.append(endEvents[value])
                                    break
                        endEvents=endval
                    if (eventName in ("node_up","node_down","site_up","site_down")):
                        beginFailureEvents=findEventOccurences(logFile,BEGIN_FAILURE_PATTERN,resName,description,eDate,sDate)
                        endFailureEvents=findEventOccurences(logFile,END_FAILURE_PATTERN,resName,description,eDate,sDate)
                    else:
                        failureEvents=findEventOccurences(logFile,FAILURE_PATTERN,resName,description,eDate,sDate)
                #if dateList is not empty, then occurenceCount is the number of begin events. 
                if len(dateList)>0:
                    occurenceCount=len(beginEvents)
                # Iterate through the begin events
                index=0
                config=0
                newline=""
                nexttoendLine="" 
                for index in reversed(range(len(beginEvents))):
                    currBeginEvent=beginEvents[index]
                    # Find number of end events for current begin event
                    currEndEvents=[eachEvent for eachEvent in endEvents if (eachEvent > currBeginEvent)]
                    #    Consider only if one end event is present for current begin event
                    #    in case of events other than network_up or network_down events,more
                    #    than one end events in case of network_up or network_down events,
                    #    as in network up/down cases we may have End event dumped in logs for
                    #    different networks, where we need to identify which end event is for 
                    #    the network of begin event
                    if (len(currEndEvents) == 1 or (len(currEndEvents)>1 and isNetworkEvent)):
                        # Find any failure events between begin and end events
                        if (eventName in ("node_up","node_down","site_up","site_down")):
                            beginFailureCount=[eachEvent for eachEvent in beginFailureEvents \
                                                  if ((eachEvent > currBeginEvent) and (eachEvent < currEndEvents[0]))]
                            endFailureCount=[eachEvent for eachEvent in endFailureEvents \
                                                if ((eachEvent > currBeginEvent) and (eachEvent < currEndEvents[0]))]
                            # set flag if any failure events are present between begin and end event
                            if len(beginFailureCount) > 0 or len(endFailureCount) > 0:
                                flag = 1
                        else:
                            failureCount=[eachEvent for eachEvent in failureEvents \
                                                  if ((eachEvent > currBeginEvent) and (eachEvent < currEndEvents[0]))]
                            # set flag if any failure events are present between begin and end event
                            if len(failureCount) > 0:
                                flag = 1

                        
                        # skip assertion if any failure is encountered 
                        if flag == 1:
                            endEvents.remove(currEndEvents[0])
                            flag=0
                        else:
                            # Python module linecache is used to fetch log content at line number from log file
                            beginLine=linecache.getline(logFile, currBeginEvent)
                            endLine=""
                            # Finding out correct end event on basis of network name
                            if (isNetworkEvent):
                                for item in currEndEvents:
                                    beginNetwork=beginLine.split('|')[4].strip('\n ')
                                    line=linecache.getline(logFile,item)
                                    endNetwork=line.split('|')[4].strip('\n ')
                                    if(beginNetwork==endNetwork):
                                        endLine=line
                                        break
                            else:
                                endLine=linecache.getline(logFile, currEndEvents[0])
                            # If no End event for the begin event found, then continue
                            if(isNetworkEvent and endLine=="" ):
                                continue 
                            beginTS=parseTimestamp(beginLine)
                            endTS=parseTimestamp(endLine)
                            timeTaken=getTimestampDifference(beginTS,endTS)
                            eachResourceData[count]=[beginTS,timeTaken,endTS]
                            if resName == "":
                                if (eventName in ("network_up","network_down","swap_adapter")):
                                    network_name=beginLine.split('|')[4].strip('\n ')
                                    if (eventName == "swap_adapter"):
                                        interface=beginLine.split('|')[5].strip('\n ')
                                        service_ip=beginLine.split('|')[6].strip('\n ')
                                        command="host %s" % service_ip
                                        status,serviceip_name = executeCommand("",command)
                                        servicename=serviceip_name.split("is ")[0]
                                        cmd='clodmget -f interfacename -n -q identifier=%s HACMPadapter' % interface
                                        status,interfacename=executeCommand("",cmd)
                                        eachResourceData[count]=[beginTS,timeTaken,endTS,network_name,servicename,interfacename]
                                    else:
                                        eachResourceData[count]=[beginTS,timeTaken,endTS,network_name]
                                elif (eventName == "config_too_long"):
                                    event_name=beginLine.split('|')[3].strip('\n ')
                                    eachResourceData[count]=[beginTS,timeTaken,endTS,event_name]
                                elif (eventName == "aix_down"):
                                    eachResourceData[count]=[beginTS,timeTaken,endTS]
                                elif (eventName == "dare_reconfig"):
                                    #fetch the last occurrence of resource configuration or unconfiguration 
                                    if(config == 0): 
                                        #Dictionary created for both resource configuration and unconfiguration
                                        newline=currEndEvents[0]+1
                                        resourcesConfigured = {}
                                        resourcesUnconfigured = {}
                                        resourcesConfigured["FILE_SYSTEM"] = {}
                                        resourcesConfigured["APPLICATION"] = {}
                                        resourcesConfigured["VOLUME_GROUP"] = {}
                                        resourcesConfigured["RESOURCE_GROUP"] = {}
                                        resourcesConfigured["SERVICE_IP"] = {}
                                        resourcesUnconfigured["FILE_SYSTEM"] = {}
                                        resourcesUnconfigured["APPLICATION"] = {}
                                        resourcesUnconfigured["VOLUME_GROUP"] = {}
                                        resourcesUnconfigured["RESOURCE_GROUP"] = {}
                                        resourcesUnconfigured["SERVICE_IP"] = {}
                                        #loop continues till next lines until the reconfiguration statistics matches  
                                        while 1:
                                            #Fetch the next line data of resource reconfig completed in availability log
                                            nexttoendLine=linecache.getline(logFile, newline)
                                            if "STATISTICS:CONFIG" in nexttoendLine: 
                                                #fetch the old configuration from the resource reconfigured line
                                                oldconfig_endLine=nexttoendLine.split('|')[2].split('OLD=')[1]
                                                #fetch the new configuration from the resource reconfigured line
                                                newconfig_endLine=nexttoendLine.split('|')[3].split('NEW=')[1]
                                                isResAdded=False
                                                isResDeleted=False
                                                #condition to check the resource configuration or unconfiguration
                                                if (len(oldconfig_endLine) < len(newconfig_endLine)):
                                                    reconfig_data=newconfig_endLine[len(oldconfig_endLine):].strip(',')
                                                    isResAdded=True
                                                else:
                                                    reconfig_data=oldconfig_endLine[len(newconfig_endLine):].strip(',')
                                                    isResDeleted=True
                                            #check the reconfig statistics exist in line or not
                                            if "STATISTICS:CONFIG:APP" in nexttoendLine:
                                               if isResAdded:
                                                   resourcesConfigured["APPLICATION"]=reconfig_data
                                               elif isResDeleted:
                                                   resourcesUnconfigured["APPLICATION"]=reconfig_data
                                            elif "STATISTICS:CONFIG:RG" in nexttoendLine:
                                               if isResAdded:
                                                   resourcesConfigured["RESOURCE_GROUP"]=reconfig_data
                                               elif isResDeleted:
                                                   resourcesUnconfigured["RESOURCE_GROUP"]=reconfig_data
                                            elif "STATISTICS:CONFIG:FS" in nexttoendLine:
                                               if isResAdded:
                                                   resourcesConfigured["FILE_SYSTEM"]=reconfig_data
                                               elif isResDeleted:
                                                   resourcesUnconfigured["FILE_SYSTEM"]=reconfig_data
                                            elif "STATISTICS:CONFIG:VG" in nexttoendLine:
                                               if isResAdded:
                                                   resourcesConfigured["VOLUME_GROUP"]=reconfig_data
                                               elif isResDeleted:
                                                   resourcesUnconfigured["VOLUME_GROUP"]=reconfig_data
                                            elif "STATISTICS:CONFIG:SERVICEIP" in nexttoendLine:
                                               if isResAdded:
                                                   resourcesConfigured["SERVICE_IP"]=reconfig_data
                                               elif isResDeleted:
                                                   resourcesUnconfigured["SERVICE_IP"]=reconfig_data
                                            else:
                                               break
                                            newline=newline+1
                                            config=config+1
                                    eachResourceData[count]=[beginTS,timeTaken,resourcesConfigured,resourcesUnconfigured]
                                else:
                                    eachResourceData[count]=[beginTS,timeTaken,beginLine.split("|")[2]]
                            else:
                                #in case of vg_sync* events we need to dump event serial number
                                if (eventName in ("vg_sync_rg_move","vg_sync_rg_move_complete")):
                                    eachResourceData[count]=[beginTS,timeTaken,beginLine.split("|")[2],beginLine.split("|")[4].strip(' \n')]
                                if eventName == "clappmond":
                                    eachResourceData[count]=[beginTS,timeTaken,beginLine.split("|")[3]] 
                            
                            # In case of netowk up/down events network_name and end time also dumped
                            endEvents.remove(currEndEvents[0])
                            beginEvents.remove(currBeginEvent)
                            
                            count=count+1
                            # Break loop if count is occurenceCount or if we reach end of beginEvents list
                            if count >= int(occurenceCount) or index == 0:
                                break
                    else:
                        # skip assertion if more than one end event is found for current begin event
                        continue
                       
            # No need to verify next cycled log files once we get average occurrences
            allResourcesData[node]=eachResourceData
            if count >= int(occurenceCount):
                break
    return allResourcesData


def generateAixMonJson(nodeList, appservList, dateList):
    """
    Function    : generateAixMonJson
    Description : This function  will create a dictionary of CPU and memory
                  usage for a specified application server. The dictionary
                  will be used to create a JSON file.
    Arguments   : nodeList - list of nodes to collect data on.
                  appservList - list of application server names to collect 
                               data for.
                  dateList - The start date and endate to collect data for.
    Return      : None.
    """
    appservData = {}  # dictionary {key = appsever name, value = nodeData dict}
    nodeData = {}     # dictionary {key = node name, value = dataSample dict}
    dataSample = {}   # dictionary {key = counter, value = metrics list}
    metrics = []      # list of log record data
    
    if len(dateList)==2:
        sDate=dateList[0]
        sDate=re.sub(r'(\||:|-|[a-zA-Z]*)',"",sDate)
        eDate=dateList[1]  
        eDate=re.sub(r'(\||:|-|[a-zA-Z]*)',"",eDate)
    else:
        # This should not happen as dateLiist is validated by the caller (main)
        defMsg="Internal cl_availability error, please contact IBM Software Support.\n"
        displayMsg2("",44,"scripts.cat",115,defMsg)
        sys.exit(1)  
    # Loop through appplication servers, then nodes, then log files
    for appserv in appservList:
        for node in nodeList.split(","):
            count=0
            for log in range(0,8):
                if log == 0:
                    logFile=TMP_DIR+"/%s/clavailability.log" % (node)
                else:
                    logFile=TMP_DIR+"/%s/clavailability.log.%s" % (node,str(log))
                # break if file does not exists
                if not os.path.isfile(logFile):
                    break
                else: 
                    # Get line numbers of AIX Monitor records in logFile
                    aixEvents=findEventOccurences(logFile, "<AIX:METRICS:PROCESS>", appserv, "", eDate, sDate)
                    # loop through the found log records
                    for index in range(len(aixEvents)):
                        # Prefix|Timestamp|application|proc name|proc pid|memory|uCPU|sCPU
                        dataLine = linecache.getline(logFile, aixEvents[index])
                        logTStamp=dataLine.split('|')[1].strip('\n')
                        logApp=dataLine.split('|')[2].strip('\n').split(' ')[1]
                        # skip if log record is not for this app server
                        if appserv != logApp:
                            continue
                        # skip if log record is not within date range requested
                        #Converting the availability log event date format <2019-05-18T00:00:05.548906> to an integer number
                        logDate=re.sub(r'(\||-|:|[a-zA-Z]*)',"",logTStamp.split('.')[0][:-2])
                        if len(sDate)>0 and sDate > logDate:
                            continue
                        if len(eDate)>0 and eDate < logDate:
                            continue
                        logMemory=dataLine.split('|')[5].strip('\n')
                        logUCpu=dataLine.split('|')[6].strip('\n')
                        logSCpu=dataLine.split('|')[7].strip('\n')
                        metrics=[logTStamp,logMemory,logUCpu,logSCpu]
                        dataSample[count]=metrics
                        count=count+1
            # End log file loop
            if count:
                nodeData[node]=dataSample
                dataSample={}
        # End of node loop    
        if len(nodeData) > 0:
            appservData[appserv]=nodeData
            nodeData={}
    # End of app server loop
    print(json.dumps(appservData))


def displayDetailedReport(reportData,dataFound):
    """ 
    Function    : displayDetailedReport
    Description : This function displays detailed availability report in tabular
                  format.
    Arguments   :
                  reportData - Dictionary with time line information of
                  an event or operation
                  dataFound - 0 if data is available in log file, 1 if not enough data is available
                  2 for displaying ROHA data
    Return      : None
    """
    # Display the detailed report to the user.
    defMsg="Event or Operation performed                : %1$s"
    displayMsg("",44,"scripts.cat",29,defMsg,reportData["OPERATION"])
    if dataFound == 0:
        if "LATEST_START_TIME" in reportData.keys():
            defMsg="Time at which latest event occurred         : %1$s"
            if reportData["LATEST_START_TIME"] == "NULL":
                displayMsg("",44,"scripts.cat",30,defMsg,EMPTY_DATA)
            else:
                displayMsg("",44,"scripts.cat",30,defMsg,reportData["LATEST_START_TIME"])
        if "LATEST_TIME_TAKEN" in reportData.keys():
            if reportData["LATEST_TIME_TAKEN"] > 0:
                defMsg="Time taken for the latest event (HH:MM:SS)             : %1$s"
                displayMsg("",44,"scripts.cat",96,defMsg,convertSecondsTo(reportData["LATEST_TIME_TAKEN"]))
            else:
                defMsg="Time taken for the latest event             : %1$s"
                displayMsg("",44,"scripts.cat",90,defMsg,EMPTY_DATA)
        if "AVG_TIME_TAKEN" in reportData.keys():
            if reportData["AVG_TIME_TAKEN"] > 0:
                defMsg="Average time taken for recent occurrences (HH:MM:SS)   : %1$s"
                displayMsg("",44,"scripts.cat",97,defMsg,convertSecondsTo(reportData["AVG_TIME_TAKEN"]))
            else:
                defMsg="Average time taken for recent occurrences   : %1$s"
                displayMsg("",44,"scripts.cat",91,defMsg,EMPTY_DATA)
        
            
    elif dataFound == 2:
        if len(reportData["memory"]) > 0:
            defMsg="Memory                                      : %1$s"
            displayMsg("",44,"scripts.cat",50,defMsg,reportData["memory"].upper())
        if int(reportData["cpu"]) > 0:
            defMsg="CPU                                         : %1$s"
            displayMsg("",44,"scripts.cat",51,defMsg,reportData["cpu"])
        if float(reportData["pu"]) > 0:
            defMsg="Processing Units                            : %1$s"
            displayMsg("",44,"scripts.cat",52,defMsg,reportData["pu"])
        if int(reportData["vp"]) > 0:
            defMsg="Virtual Processors                          : %1$s"
            displayMsg("",44,"scripts.cat",53,defMsg,reportData["vp"])
        if "LATEST_START_TIME" in reportData.keys():
            defMsg="Time at which latest event occurred         : %1$s"
            if reportData["LATEST_START_TIME"] == "NULL":
                displayMsg("",44,"scripts.cat",30,defMsg,EMPTY_DATA)
            else:
                displayMsg("",44,"scripts.cat",30,defMsg,reportData["LATEST_START_TIME"])
        if "LATEST_TIME_TAKEN" in reportData.keys():
            if reportData["LATEST_TIME_TAKEN"] > 0:
                defMsg="Time taken for the latest event (HH:MM:SS)             : %1$s"
                displayMsg("",44,"scripts.cat",96,defMsg,convertSecondsTo(reportData["LATEST_TIME_TAKEN"]))
            else:
                defMsg="Time taken for the latest event             : %1$s"
                displayMsg("",44,"scripts.cat",90,defMsg,EMPTY_DATA)
    else:
        defMsg="Time at which latest event occurred         : %1$s"
        displayMsg("",44,"scripts.cat",30,defMsg,EMPTY_DATA)
        defMsg="Average time taken for recent occurrences   : %1$s"
        displayMsg("",44,"scripts.cat",91,defMsg,EMPTY_DATA)
    return None

def displayReport(reportData,dataFound):
    """ 
    Function    : displayReport
    Description : This function displays availability report in tabular
                  format.
    Arguments   :
                  reportData - Dictionary with time line information of
                  an event or operation
                  dataFound - 0 if data is available in log file, 1 otherwise
    Return      : None
    """
    # Display the report to the user.
    defMsg="Event or Operation performed                : %1$s"
    displayMsg("",44,"scripts.cat",29,defMsg,reportData["OPERATION"])

    if not dataFound:  
        defMsg="Time at which latest event occurred         : %1$s"
        displayMsg("",44,"scripts.cat",30,defMsg,reportData["LATEST_START_TIME"])
    else:
        defMsg="Time at which latest event occurred         : %1$s"
        displayMsg("",44,"scripts.cat",30,defMsg,EMPTY_DATA)

    if not dataFound and reportData["LATEST_TIME_TAKEN"] > 0:
        defMsg="Time taken for the latest event (HH:MM:SS)             : %1$s"
        displayMsg("",44,"scripts.cat",96,defMsg,convertSecondsTo(reportData["LATEST_TIME_TAKEN"]))
    else:
        defMsg="Time taken for the latest event             : %1$s" 
        displayMsg("",44,"scripts.cat",90,defMsg,EMPTY_DATA)

    if not dataFound and reportData["AVG_TIME_TAKEN"] > 0:
        defMsg="Average time taken for recent %1$d occurrences (HH:MM:SS) : %2$s"
        displayMsg("",44,"scripts.cat",98,defMsg,reportData["OCCURRENCES"],convertSecondsTo(reportData["AVG_TIME_TAKEN"]))
    else:
        defMsg="Average time taken for recent occurrences   : %1$s"
        displayMsg("",44,"scripts.cat",91,defMsg,EMPTY_DATA)

    return None

def displaySiteCentricReport(siteData,nodesInSite,isDetailed,average,dateList):
    """ 
    Function    : displaySiteCentricReport
    Description : This function displays the latest start time and
                  time taken for recent 5 occurrences of site_up and
                  site_down events
    Arguments   : 
                  siteData    - Dictionary returned by lookUPEvents function
                  nodesInSite - Validated node list for each site
                  isDetailed  - True, if detailed report is needed
                                FALSE, otherwise
                  average     - Report to generate average number of event occurences
                  dateList    - [startdate,enddate]
    Return      : None
    """
    latestStartTime={}
    latestTimeTaken={}
    avgTimeTaken={}
    occurrences={}

    # Initialize the reportData dictionary
    reportData={}
    reportData["SITE"]=""
    reportData["OPERATION"]=""
    reportData["LATEST_START_TIME"]=""
    reportData["LATEST_TIME_TAKEN"]=0
    reportData["AVG_TIME_TAKEN"]=0
    reportData["OCCURRENCES"]=0

    sum=0
    for site in siteData.keys():
        dataFound ={} 
        # Capture the data for site_up and site_down events
        for item in siteData[site].keys():
            occurrences[item] = len(siteData[site][item].keys())
            if occurrences[item] > 0:
                dataFound[item] = 0
                avgTimeTaken[item] = 0
                latestStartTime[item] = ""
                latestTimeTaken[item] = ""

                # Calculate the total time taken to complete
                # site_up or site_down events for latest 5 occurrences
                for eachOccurence in siteData[site][item]:
                    sum=sum+siteData[site][item][eachOccurence][1]

                # Store the start time, time taken for latest occurrence and
                # average time taken for site_up and site_down events on a node
                latestStartTime[item] = re.match("^.*\.\d{2}",siteData[site][item][0][0]).group()
                latestTimeTaken[item] = siteData[site][item][0][1]
                avgTimeTaken[item] = sum/occurrences[item]
                sum=0
            else:
                dataFound[item] = 1
        # Display site centric report to user
        if not dataFound["START"] or not dataFound["STOP"]:
            defMsg="\nSite                                        : %1$s"
            displayMsg("",44,"scripts.cat",71,defMsg,site)
        # Display the time lines information for site_up event to user
        reportData["OPERATION"] = "Start\ site"
        if dataFound["START"] == 0:
            reportData["LATEST_START_TIME"] = latestStartTime["START"]
            reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken["START"]),2)
            reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken["START"]),2) 
            reportData["OCCURRENCES"] = occurrences["START"]
        displayReport(reportData,dataFound["START"])

        # Display the time lines information for site_down event to user
        reportData["OPERATION"] = "Stop\ site"
        if dataFound["STOP"] == 0:
            reportData["LATEST_START_TIME"] = latestStartTime["STOP"]
            reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken["STOP"]),2)
            reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken["STOP"]),2) 
            reportData["OCCURRENCES"] = occurrences["STOP"]
        displayReport(reportData,dataFound["STOP"])

    # Display the time lines information for nodes associated
    # with site for detailed report
    if isDetailed == True:
        defMsg="\nDetailed Report:"
        displayMsg("",44,"scripts.cat",33,defMsg)
        for site in nodesInSite.keys():
            defMsg="\nSite                                        : %1$s"
            displayMsg("",44,"scripts.cat",71,defMsg,site)
            nodeCentricReport(nodesInSite[site],False,True,False,average,dateList)

    return None

def displayNodeCentricReport(data,rgsInNode,rgData,rohaData,isDetailed):
    """ 
    Function    : displayNodeCentricReport
    Description : This function displays the latest start time and
                  time taken for recent 5 occurrences of node_up and
                  node_down events
    Arguments   : 
                  data - Dictionary returned by lookUPEvents function
                  rgsInNode - Dictionary with nodewise list of resource
                     groups configured
                  rgData - Dictionary with timeline information of resource
                     groups configured in node
                  rohaData - Dictionary with timeline information of ROHA acquire/release
                     for each node
                  isDetailed - True, if detailed report is needed
                               FALSE, otherwise   
    Return      : None 
    """
    latestStartTime={}
    latestTimeTaken={}
    avgTimeTaken={}
    occurrences={}
    dataFound={}

    # Initialize the reportData dictionary
    reportData={}
    reportData["NODE"]=""
    reportData["OPERATION"]=""
    reportData["LATEST_START_TIME"]=""
    reportData["LATEST_TIME_TAKEN"]=0
    reportData["AVG_TIME_TAKEN"]=0
    reportData["OCCURRENCES"]=0

    sum=0
    for node in data.keys():
        dataFound ={} 
        # Capture the data for node_up and node_down events
        for item in data[node].keys():
            occurrences[item] = len(data[node][item].keys())
            if occurrences[item] > 0:
                dataFound[item] = 0
                avgTimeTaken[item] = 0
                latestStartTime[item] = ""
                latestTimeTaken[item] = ""

                # Calculate the total time taken to complete
                # node_up or node_down events for latest 5 occurrences
                for eachOccurence in data[node][item]:
                    sum=sum+data[node][item][eachOccurence][1]

                # Store the start time, time taken for latest occurrence and
                # average time taken for node_up and node_down events on a node
                latestStartTime[item] = re.match("^.*\.\d{2}",data[node][item][0][0]).group()
                latestTimeTaken[item] = data[node][item][0][1]
                avgTimeTaken[item] = sum/occurrences[item]
                sum=0
            else:
                dataFound[item] = 1
         
        if item == "verify" or item == "sync" or item == "reconfigdetailed":
            if item == "verify":
                reportData["OPERATION"] = "verification"
            if item == "sync":
                reportData["OPERATION"] = "synchronization"
            if item == "reconfigdetailed":
                config_flag=0
                unconfig_flag=0
                if(len(data[node][item])>0):
                    # Display the time lines information for reconfigure of resource
                    # with node for detailed report
                    defMsg="\nNode                                        : %1$s"
                    displayMsg("",44,"scripts.cat",28,defMsg,node)
                    reportData["LATEST_START_TIME"] = latestStartTime[item]
                    reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken[item]),2)
                    reportData["OCCURRENCES"] = occurrences[item]
                    displayReport(reportData,dataFound[item])
                    occurrence=max(data[node][item].keys())
                    defMsg="\nEvent or Operation performed                : %1$s"
                    displayMsg("",44,"scripts.cat",29,defMsg,"Configured\ Resource ")
                    for resource,resData in data[node][item][occurrence][2].items():
                        if(len(resData)>0):
                            defMsg="Resource Type                               : %1$s"
                            displayMsg("",44,"scripts.cat",39,defMsg,resource)
                            defMsg="Resource name                               : %1$s"
                            displayMsg("",44,"scripts.cat",40,defMsg,resData)
                            config_flag=config_flag+1
                    if(config_flag==0):
                        defMsg="No resource configured in the DARE."
                        displayMsg("",44,"scripts.cat",78,defMsg)
                    defMsg="Event or Operation performed                : %1$s"
                    displayMsg("",44,"scripts.cat",29,defMsg,"Unconfigured\ Resource")
                    for resource,resData in data[node][item][occurrence][3].items():
                        if(len(resData)>0):
                            defMsg="Resource Type                               : %1$s"
                            displayMsg("",44,"scripts.cat",39,defMsg,resource)
                            defMsg="Resource name                               : %1$s"
                            displayMsg("",44,"scripts.cat",40,defMsg,resData)
                            unconfig_flag=unconfig_flag+1
                    if(unconfig_flag==0):
                        defMsg="No resource unconfigured in the DARE."
                        displayMsg("",44,"scripts.cat",79,defMsg)
                else:
                    defMsg="\nData related to DARE operations is not available on node: %1$s"
                    displayMsg("",44,"scripts.cat",83,defMsg,node)
            else:
                defMsg="\nNode                                        : %1$s"
                displayMsg("",44,"scripts.cat",28,defMsg,node)
                if dataFound[item] == 0:
                    reportData["LATEST_START_TIME"] = latestStartTime[item]
                    reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken[item]),2)
                    reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken[item]),2)
                    reportData["OCCURRENCES"] = occurrences[item]
                displayReport(reportData,dataFound[item])
        else:
 
            if not dataFound["START"] or not dataFound["STOP"]:
                defMsg="\nNode                                        : %1$s"
                displayMsg("",44,"scripts.cat",28,defMsg,node)

            # Display the time lines information for ffdc event to user
            # for detailed node centric report
            if isDetailed == True:
                reportData["OPERATION"] = "First\ Failure\ Data\ Capture"
                if dataFound["FFDC"] == 0:
                    reportData["LATEST_START_TIME"] = latestStartTime["FFDC"]
                    reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken["FFDC"]),2)
                    reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken["FFDC"]),2)
                    reportData["OCCURRENCES"] = occurrences["FFDC"]
                displayReport(reportData,dataFound["FFDC"])

            # Display the time lines information for node_up event to user
            reportData["OPERATION"] = "Start\ cluster\ services"
            if dataFound["START"] == 0:
                reportData["LATEST_START_TIME"] = latestStartTime["START"]
                reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken["START"]),2)
                reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken["START"]),2) 
                reportData["OCCURRENCES"] = occurrences["START"]
            displayReport(reportData,dataFound["START"])

            # Display the time lines information for node_down event to user
            reportData["OPERATION"] = "Stop\ cluster\ services"
            if dataFound["STOP"] == 0:
                reportData["LATEST_START_TIME"] = latestStartTime["STOP"]
                reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken["STOP"]),2)
                reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken["STOP"]),2) 
                reportData["OCCURRENCES"] = occurrences["STOP"]
            displayReport(reportData,dataFound["STOP"])

    # Display the time lines information for resources groups associated
    # with node for detailed report
    if isDetailed == True:
        defMsg="\nDetailed Report:"
        displayMsg("",44,"scripts.cat",33,defMsg)
        # Fetch and display the timelines of each resource group in a node
        for node in rgsInNode.keys():
            defMsg="\nNode                                        : %1$s"
            displayMsg("",44,"scripts.cat",28,defMsg,node)
            for rg in rgsInNode[node]:
                detailedReportData = {}
                defMsg="Resource Group                              : %1$s"
                displayMsg("",44,"scripts.cat",34,defMsg,rg)
                # Report for online operation of an RG
                detailedDataFound = 1
                detailedReportData["OPERATION"] = "Resource\ group\ online"
                # Check the latestOnlineTime dictionary is not empty and then fill the attributes 
                if len(rgData[rg].latestOnlineTime[node]) != 0:
                    detailedDataFound = 0
                    detailedReportData["LATEST_TIME_TAKEN"] = round(float(rgData[rg].rgtotalOnlineTime[node]),2)
                    detailedReportData["AVG_TIME_TAKEN"] = round(float(rgData[rg].rgaverageOnlineTime[node]),2)
                    detailedReportData["LATEST_START_TIME"] = rgData[rg].latestOnlineTime[node][:-4]
                displayDetailedReport(detailedReportData,detailedDataFound)
                # Report for offline operation of an RG
                detailedDataFound = 1
                detailedReportData["OPERATION"] = "Resource\ group\ offline"
                # Check the latestOfflineTime dictionary is not empty and then fill the attributes 
                if len(rgData[rg].latestOfflineTime[node]) != 0:
                    detailedDataFound = 0
                    detailedReportData["LATEST_TIME_TAKEN"] = round(float(rgData[rg].rgtotalOfflineTime[node]),2)
                    detailedReportData["AVG_TIME_TAKEN"] = round(float(rgData[rg].rgaverageOfflineTime[node]),2)
                    detailedReportData["LATEST_START_TIME"] = rgData[rg].latestOfflineTime[node][:-4]
                displayDetailedReport(detailedReportData,detailedDataFound)
            
            # Displaying ROHA acquire and release data for each node
            detailedReportData = {}
            detailedDataFound = 1
            
            if len(rohaData[node]["acquireAssessment"]) > 0:
                detailedDataFound = 0
                detailedReportData={}
                detailedReportData["OPERATION"] = "ROHA\ Assessment\ during\ Acquire"
                detailedReportData["LATEST_TIME_TAKEN"] = round(float(rohaData[node]["acquireAssessment"][1]),2)
                detailedReportData["LATEST_START_TIME"] = rohaData[node]["acquireAssessment"][0]
                displayDetailedReport(detailedReportData,detailedDataFound)
                
            if node in rohaData.keys() and len(rohaData[node]["acquire"]) > 0:
                detailedDataFound = 2
                detailedReportData={}
                detailedReportData["OPERATION"] = "ROHA\ Acquire"
                detailedReportData["LATEST_TIME_TAKEN"] = round(float(rohaData[node]["acquire"][1]),2)
                detailedReportData["LATEST_START_TIME"] = rohaData[node]["acquire"][0]
                detailedReportData['memory']=""
                detailedReportData['cpu']=0
                detailedReportData['pu']=0
                detailedReportData['vp']=0
                words=""
                if rohaData[node]["acquire"][2] != "NULL":
                    words=rohaData[node]["acquire"][2].replace(',','').lower().split()
                if "memory" in words:
                    detailedReportData['memory']=words[words.index('memory')-1]
                if "processing_units" in  words:
                    detailedReportData['pu']=words[words.index('processing_units')-1]
                if "CPUs" in words:
                    detailedReportData['cpu']=words[words.index('CPUs')-1]
                if "virtual_processors" in words:
                    detailedReportData['vp']=words[words.index('virtual_processors')-1]

                displayDetailedReport(detailedReportData,detailedDataFound)
            
            if len(rohaData[node]["releaseAssessment"]) > 0:
                detailedDataFound = 0
                detailedReportData={}
                detailedReportData["OPERATION"] = "ROHA\ Assessment\ during\ Release"
                detailedReportData["LATEST_TIME_TAKEN"] = round(float(rohaData[node]["releaseAssessment"][1]),2)
                detailedReportData["LATEST_START_TIME"] = rohaData[node]["releaseAssessment"][0]
                displayDetailedReport(detailedReportData,detailedDataFound)
            
                
            if node in rohaData.keys() and len(rohaData[node]["release"]) > 0:
                detailedDataFound = 2
                detailedReportData={}
                detailedReportData["OPERATION"] = "ROHA\ Release"
                detailedReportData["LATEST_TIME_TAKEN"] = round(float(rohaData[node]["release"][1]),2)
                detailedReportData["LATEST_START_TIME"] = rohaData[node]["release"][0]
                detailedReportData['memory']=""
                detailedReportData['cpu']=0
                detailedReportData['pu']=0
                detailedReportData['vp']=0
                words=rohaData[node]["release"][2].replace(',','').lower().split()
                if "memory" in words:
                    detailedReportData['memory']=words[words.index('memory')-1]
                if "processing_units" in  words:
                    detailedReportData['pu']=words[words.index('processing_units')-1]
                if "CPUs" in words:
                    detailedReportData['cpu']=words[words.index('CPUs')-1]
                if "virtual_processors" in words:
                    detailedReportData['vp']=words[words.index('virtual_processors')-1]
                displayDetailedReport(detailedReportData,detailedDataFound)

    return None

def displayRGReport(RGObjects,eachRG,node,operation):
    """
    Function    : displayRGReport
    Description : This function displays the total online time, total offline time,
                  Average online time, Average offline time.
    Arguments   : RGObjects - Dictionary with key as RG name, value as Object
                  eachRG    - RG name
                  node      - Node name
                  operation - online or offline
                  
    Return      : None 
    """
    defMsg="Event or Operation performed                : %1$s"
    displayMsg("",44,"scripts.cat",29,defMsg,"Resource\ Group\ "+operation)
    totalTimeToDisplay=0
    averageTimeToDisplay=0
    laststartTimeToDisplay=EMPTY_DATA
    if operation == "online" and len(RGObjects[eachRG].latestOnlineTime[node]) != 0:
        totalTimeToDisplay=RGObjects[eachRG].rgtotalOnlineTime[node]
        averageTimeToDisplay=RGObjects[eachRG].rgaverageOnlineTime[node]
        laststartTimeToDisplay=RGObjects[eachRG].latestOnlineTime[node][:-4]
    elif operation == "offline" and len(RGObjects[eachRG].latestOfflineTime[node]):
        totalTimeToDisplay=RGObjects[eachRG].rgtotalOfflineTime[node]
        averageTimeToDisplay=RGObjects[eachRG].rgaverageOfflineTime[node]
        laststartTimeToDisplay=RGObjects[eachRG].latestOfflineTime[node][:-4]
    # Rounding off to two decimal points
    totalTimeToDisplay=round(totalTimeToDisplay,2)
    averageTimeToDisplay=round(averageTimeToDisplay,2)
    if laststartTimeToDisplay != EMPTY_DATA and len(laststartTimeToDisplay) > 0:
        defMsg="Time at which latest event occurred         : %1$s"
        displayMsg("",44,"scripts.cat",30,defMsg,laststartTimeToDisplay)
    else:
        defMsg="Time at which latest event occurred         : %1$s"
        displayMsg("",44,"scripts.cat",30,defMsg,EMPTY_DATA)
    if totalTimeToDisplay > 0:
        defMsg="Time taken for the latest event (HH:MM:SS)             : %1$s"
        displayMsg("",44,"scripts.cat",96,defMsg,convertSecondsTo(totalTimeToDisplay))
    else:
        defMsg="Time taken for the latest event             : %1$s"
        displayMsg("",44,"scripts.cat",90,defMsg,EMPTY_DATA)
    if averageTimeToDisplay > 0:
        defMsg="Average time taken for recent occurrences (HH:MM:SS)   : %1$s"
        displayMsg("",44,"scripts.cat",97,defMsg,convertSecondsTo(averageTimeToDisplay))
    else:
        defMsg="Average time taken for recent occurrences   : %1$s"
        displayMsg("",44,"scripts.cat",91,defMsg,EMPTY_DATA)


def displayAppMonReport(RGObjects,rgList,invalidRGs):
    """
    Function    : displayAppMonReport
    Description : This function displays the application monitor entries
                  in the RG centric detailed report.
    Arguments   : RGobjects - A list of the RG Classes for the nodes in
                              the report
                  rgList - A list of the RGs included in the report
                  invalidRGs - A list of any RGs found to be invalid or
                               not in the current configuration.
                  isDetailed - True if user specifies -d option
                               False if user does not specify -d option
    Return      : None
    """
    latestStartTime={}
    latestTimeTaken={}
    avgTimeTaken={}
    occurrences={}
    dataFound={}
    
    # Initialize the reportData dictionary
    reportData={}
    reportData["NODE"]=""
    reportData["OPERATION"]=""
    reportData["LATEST_START_TIME"]=""
    reportData["LATEST_TIME_TAKEN"]=0
    reportData["AVG_TIME_TAKEN"]=0
    reportData["OCCURRENCES"]=0
    nodeResourceAppMonTime={}
    sum=0
    for eachRG in rgList:
        defMsg="Resource Group                              : %1$s"
        displayMsg("",44,"scripts.cat",34,defMsg,eachRG)
        defMsg="Resource Type                               : %1$s"
        displayMsg("",44,"scripts.cat",39,defMsg,"APPLICATION\ MONITOR\ ")
        for node in RGObjects[eachRG].participatingNodes:
            #check the node is exist in the dictionary,if so then proceed further
            if node in RGObjects[eachRG].resourceAppMonTime:
                nodeResourceAppMonTime=RGObjects[eachRG].resourceAppMonTime[node]
                for occurrence in nodeResourceAppMonTime.keys():
                    for appoccurrence in nodeResourceAppMonTime[occurrence].keys():
                        command="clodmget -n -q \"monitor='%s' and name='MONITOR_TYPE' and type='APPLICATIONS'\" -f value HACMPmonitor" % appoccurrence
                        status,monType=executeCommand("",command)
                        if status:
                            defMsg="Application monitor %1$s is not in the configuration.\n"
                            displayMsg("",44,"scripts.cat",111,defMsg,appoccurrence)
                            # display warning message when no application monitor are configured and exit with '0'
                            sys.exit(0)
                        if monType == "process":
                            continue

                        for item in nodeResourceAppMonTime[occurrence][appoccurrence].keys():
                            defMsg="\nNode                                        : %1$s"
                            displayMsg("",44,"scripts.cat",28,defMsg,item)
                            defMsg="Resource name                               : %1$s"
                            displayMsg("",44,"scripts.cat",40,defMsg,appoccurrence)
                            command="clodmget -n -q \"monitor='%s' and name='MONITOR_METHOD' and type='APPLICATIONS'\" -f value HACMPmonitor" % appoccurrence
                            status,monName=executeCommand("",command)
                            if status:
                                defMsg="Application monitor %1$s is not in the configuration.\n"
                                displayMsg("",44,"scripts.cat",111,defMsg,appoccurrence)
                                # display warning message when no application monitor are configured and exit with '0'
                                sys.exit(0)
                            defMsg="Monitor method                              : %1$s"
                            displayMsg("",44,"scripts.cat",40,defMsg,monName)
                            dataFound ={}
                            occurrences[item] = len(nodeResourceAppMonTime[occurrence][appoccurrence][item].keys())
                            if occurrences[item] > 0:
                                dataFound[item] = 0
                                avgTimeTaken[item] = 0
                                latestStartTime[item] = ""
                                latestTimeTaken[item] = ""
                                for eachOccurence in nodeResourceAppMonTime[occurrence][appoccurrence][item]:
                                    sum=sum+nodeResourceAppMonTime[occurrence][appoccurrence][item][eachOccurence][1]
                                latestStartTime[item] = re.match("^.*\.\d{2}",nodeResourceAppMonTime[occurrence][appoccurrence][item][0][0]).group()
                                latestTimeTaken[item] = nodeResourceAppMonTime[occurrence][appoccurrence][item][0][1]
                                avgTimeTaken[item] = sum/occurrences[item]
                                sum=0
                            else:
                                dataFound[item] = 1
                        if dataFound[item] == 0:
                            reportData["LATEST_START_TIME"] = latestStartTime[item]
                            reportData["LATEST_TIME_TAKEN"] = round(float(latestTimeTaken[item]),2)
                            reportData["AVG_TIME_TAKEN"] = round(float(avgTimeTaken[item]),2)
                            reportData["OCCURRENCES"] = occurrences[item]
                        displayReport(reportData,dataFound[item]) 

        
def displayRGCentricReport(RGObjects,rgList,invalidRGs,isDetailed):
    """ 
    Function    : displayRGCentricReport
    Description : This function displays the RG Centric report with 
                  latest times and average times. If detailed report is
                  specified then latest and average times of each resource
                  is also displayed
    Arguments   : 
                  isDetailed - True if user specifies -d option
                               False if user does not specify -d option
    Return      : None 
    """
    if isDetailed == False:
        for eachRG in rgList:
            nonParticipatingNodes=[]
            defMsg="Resource Group                              : %1$s"
            displayMsg("",44,"scripts.cat",34,defMsg,eachRG)
            for node in nodeList:
                if node in RGObjects[eachRG].participatingNodes:
                    defMsg="\nNode                                        : %1$s"
                    displayMsg("",44,"scripts.cat",28,defMsg,node)
                    for operation in ["online","offline"]:
                        displayRGReport(RGObjects,eachRG,node,operation)
                else:
                    nonParticipatingNodes.append(node)
            if nonParticipatingNodes:
                defMsg="\nNode(s) %1$s is not in participating nodes of Resource Group %2$s. No report is generated for these nodes\n\n"
                displayMsg("",44,"scripts.cat",54,defMsg,','.join(nonParticipatingNodes),eachRG)
        print('')
    else:
        for eachRG in rgList:
            nonParticipatingNodes=[]
            defMsg="Resource Group                              : %1$s"
            displayMsg("",44,"scripts.cat",34,defMsg,eachRG)
            for node in nodeList:
                if node in RGObjects[eachRG].participatingNodes:
                    defMsg="\nNode                                        : %1$s"
                    displayMsg("",44,"scripts.cat",28,defMsg,node)
                    for operation in ["online","offline"]:
                        displayRGReport(RGObjects,eachRG,node,operation)
    
                        command='clodmget -n -q \"group=%s and name=FS_BEFORE_IPADDR\" -f value HACMPresource' % (eachRG)
                        status,order=executeCommand("",command)
                        if order == "false" or order == "":
                            resourceOrder=["WPAR_NAME","SERVICE_LABEL","VOLUME_GROUP","FILESYSTEM","NFS","APPLICATIONS"]
                        elif order == "true":
                            resourceOrder=["WPAR_NAME","VOLUME_GROUP","FILESYSTEM","SERVICE_LABEL","NFS","APPLICATIONS"]
                        
                        # Reverse the resource order during offline operation
                        if operation == "offline":
                            resourceOrder.reverse()
                        
                        for resType in resourceOrder:
                            if len(RGObjects[eachRG].resourcesInRG[resType]) == 0:
                                continue
                                
                            onlineString=""
                            offlineString=""
                            
                            if resType == "SERVICE_LABEL":
                                onlineString="Alias"
                                offlineString="Remove"
                                defMsg="\nNetwork Time (HH:MM:SS)                                : %1$s"
                                if RGObjects[eachRG].networkOnlineTime[node] != 0 and operation == "online":
                                    displayMsg("",44,"scripts.cat",99,defMsg,convertSecondsTo(round(RGObjects[eachRG].networkOnlineTime[node],2)))
                                elif RGObjects[eachRG].networkOfflineTime[node] != 0 and operation == "offline":
                                    displayMsg("",44,"scripts.cat",99,defMsg,convertSecondsTo(round(RGObjects[eachRG].networkOfflineTime[node],2)))
                                else:
                                    defMsg="\nNetwork Time                                : %1$s"
                                    displayMsg("",44,"scripts.cat",87,defMsg,EMPTY_DATA)
                            elif resType == "APPLICATIONS":
                                onlineString="Start"
                                offlineString="Stop"
                                if RGObjects[eachRG].applicationOnlineTime[node] != 0:
                                    defMsg="\nApplication Time (HH:MM:SS)                            : %1$s"
                                    if operation == "online":
                                        displayMsg("",44,"scripts.cat",100,defMsg,convertSecondsTo(round(RGObjects[eachRG].applicationOnlineTime[node],2)))
                                    else:
                                        displayMsg("",44,"scripts.cat",100,defMsg,convertSecondsTo(round(RGObjects[eachRG].applicationOfflineTime[node],2)))
                                else:
                                    defMsg="\nApplication Time                                : %1$s"
                                    displayMsg("",44,"scripts.cat",88,defMsg,EMPTY_DATA)
                            elif resType == "VOLUME_GROUP":
                                onlineString="Varyon"
                                offlineString="Varyoff"
                                # Displayed only for online operation as Volume group is brought online first then Filesystem
                                if RGObjects[eachRG].storageOnlineTime[node] != 0:
                                    defMsg="\nStorage Time (HH:MM:SS)                                : %1$s"
                                    if operation == "online":
                                        displayMsg("",44,"scripts.cat",101,defMsg,convertSecondsTo(round(RGObjects[eachRG].storageOnlineTime[node],2)))
                                else:
                                    defMsg="\nStorage Time                                : %1$s"
                                    displayMsg("",44,"scripts.cat",89,defMsg,EMPTY_DATA)
                            elif resType == "FILESYSTEM":
                                onlineString="Mount"
                                offlineString="Unmount"
                                # Displayed only for offline operation as Filesystem is brought offline first then Volume group
                                if RGObjects[eachRG].storageOfflineTime[node] != 0:
                                    defMsg="\nStorage Time (HH:MM:SS)                                : %1$s"
                                    if operation == "offline":
                                        displayMsg("",44,"scripts.cat",101,defMsg,convertSecondsTo(round(RGObjects[eachRG].storageOfflineTime[node],2)))
                                else:
                                    defMsg="\nStorage Time                                : %1$s"
                                    displayMsg("",44,"scripts.cat",89,defMsg,EMPTY_DATA)
                            elif resType == "NFS":
                                onlineString="Activate"
                                offlineString="Deactivate"
                            elif resType == "WPAR_NAME":
                                onlineString="Start"
                                offlineString="Stop"
                            
                            defMsg="Resource Type                               : %1$s"
                            displayMsg("",44,"scripts.cat",39,defMsg,resType)
                            for resource in RGObjects[eachRG].resourcesInRG[resType]:
                                # When ODM has both filesystem and ALL, In that case it consider "ALL" is an filesytem
                                # so to overcome that a check added to exclude when resource is "ALL"
                                if operation == "online" and resource.lower() != "all":
                                    if len(RGObjects[eachRG].resourceOnlineTime[node][resType][resource]) > 0:
                                        defMsg="Resource name                               : %1$s"
                                        displayMsg("",44,"scripts.cat",40,defMsg,"\"%s\""%resource)
                                        if resType == "VOLUME_GROUP":
                                            #online string is based upon the vg operation which can be retrieved
                                            # using following dictionary with key as vg operation and value as 
                                            #online string to be displayed in report.

                                            dictOnlineString={'VARY_ON':'Varyon','SYNC_VG':'SyncVG'}
                                            for vgOperation in RGObjects[eachRG].resourceOnlineTime[node][resType][resource].keys():
                                                defMsg="Operation performed                         : %1$s"
                                                displayMsg("",44,"scripts.cat",41,defMsg,dictOnlineString[vgOperation])
                                                if vgOperation in ("VARY_ON"):
                                                    defMsg="Time at which latest event occurred         : %1$s"
                                                    displayMsg("",44,"scripts.cat",30,defMsg,RGObjects[eachRG].resourceOnlineTime[node][resType][resource][vgOperation][0][0])
                                                    timeToDisplay=round(RGObjects[eachRG].resourceOnlineTime[node][resType][resource][vgOperation][0][1],2)
                                                #time stamp to display for first occurence will be time taken for first
                                                # two occurrences for SYNC_VG.
    
                                                else:
                                                    timeToDisplay=round((RGObjects[eachRG].resourceOnlineTime[node][resType][resource][vgOperation][0][1]+RGObjects[eachRG].resourceOnlineTime[node][resType][resource][vgOperation][1][1]),2) 
                                                averageTimeToDisplay=round(RGObjects[eachRG].averageOnlineTime[node][resType][resource][vgOperation],2)
                                                if timeToDisplay > 0: 
                                                    defMsg="Time taken for the latest event (HH:MM:SS)             : %1$s"
                                                    displayMsg("",44,"scripts.cat",96,defMsg,convertSecondsTo(timeToDisplay))
                                                else:
                                                    defMsg="Time taken for the latest event             : %1$s"
                                                    displayMsg("",44,"scripts.cat",90,defMsg,EMPTY_DATA)
                                                if averageTimeToDisplay > 0: 
                                                    defMsg="Average time taken for recent occurrences (HH:MM:SS)   : %1$s"
                                                    displayMsg("",44,"scripts.cat",97,defMsg,convertSecondsTo(averageTimeToDisplay))
                                                else:
                                                    defMsg="Average time taken for recent occurrences   : %1$s"
                                                    displayMsg("",44,"scripts.cat",91,defMsg,EMPTY_DATA)
                                        else:
                                            defMsg="Operation performed                         : %1$s"
                                            displayMsg("",44,"scripts.cat",41,defMsg,onlineString)
                                            timeStamp=RGObjects[eachRG].resourceOnlineTime[node][resType][resource][0][0]
                                            timeToDisplay=round(RGObjects[eachRG].resourceOnlineTime[node][resType][resource][0][1],2)
                                            averageTimeToDisplay=round(RGObjects[eachRG].averageOnlineTime[node][resType][resource],2)
                                            displayResourceReport(timeStamp,timeToDisplay,averageTimeToDisplay)    
                                        # Display the timelines of application commands for smart assist applications
                                        if resType == "APPLICATIONS" and RGObjects[eachRG].isSARG:
                                            for subOperation in RGObjects[eachRG].latestSAStartOperations[node].keys():
                                                defMsg="Sub Operation performed                     : %1$s"
                                                displayMsg("",44,"scripts.cat",43,defMsg,subOperation.replace(' ','\ '))
                                                defMsg="Time at which sub operation occurred        : %1$s"
                                                displayMsg("",44,"scripts.cat",44,defMsg,RGObjects[eachRG].latestSAStartOperations[node][subOperation][0])
                                                defMsg="Time taken for the sub operation (HH:MM:SS)            : %1$s"
                                                displayMsg("",44,"scripts.cat",102,defMsg,convertSecondsTo(round(RGObjects[eachRG].latestSAStartOperations[node][subOperation][1],2)))
                                                defMsg="Average time taken for the sub operation (HH:MM:SS)    : %1$s"
                                                displayMsg("",44,"scripts.cat",103,defMsg,convertSecondsTo(round(RGObjects[eachRG].averageSAStartOperations[node][subOperation],2)))
    
                                # When ODM has both filesystem and ALL, In that case it consider "ALL" is an filesytem
                                # so to overcome that a check added to exclude when resource is "ALL"
                                elif operation == "offline" and resource.lower() != "all":
                                    if len(RGObjects[eachRG].resourceOfflineTime[node][resType][resource]) > 0:
                                        defMsg="Operation performed                         : %1$s"
                                        displayMsg("",44,"scripts.cat",41,defMsg,offlineString)
                                        defMsg="Resource name                               : %1$s"
                                        displayMsg("",44,"scripts.cat",40,defMsg,"\"%s\""%resource)
                                        timeStamp=RGObjects[eachRG].resourceOfflineTime[node][resType][resource][0][0]
                                        timeToDisplay=round(RGObjects[eachRG].resourceOfflineTime[node][resType][resource][0][1],2)
                                        averageTimeToDisplay=round(RGObjects[eachRG].averageOfflineTime[node][resType][resource],2)
                                        displayResourceReport(timeStamp,timeToDisplay,averageTimeToDisplay)    
                                        # Display the timelines of application commands for smart assist applications
                                        if resType == "APPLICATIONS" and RGObjects[eachRG].isSARG:
                                            for subOperation in RGObjects[eachRG].latestSAStopOperations[node].keys():
                                                defMsg="Sub Operation performed                     : %1$s"
                                                displayMsg("",44,"scripts.cat",43,defMsg,subOperation.replace(' ','\ '))
                                                defMsg="Time at which sub operation occurred        : %1$s"
                                                displayMsg("",44,"scripts.cat",44,defMsg,RGObjects[eachRG].latestSAStopOperations[node][subOperation][0])
                                                defMsg="Time taken for the sub operation (HH:MM:SS)            : %1$s"
                                                displayMsg("",44,"scripts.cat",102,defMsg,convertSecondsTo(round(RGObjects[eachRG].latestSAStopOperations[node][subOperation][1],2)))
                                                defMsg="Average time taken for the sub operation (HH:MM:SS)    : %1$s"
                                                displayMsg("",44,"scripts.cat",103,defMsg,convertSecondsTo(round(RGObjects[eachRG].averageSAStopOperations[node][subOperation],2)))
                        print('')
                else:
                    nonParticipatingNodes.append(node)
            if nonParticipatingNodes:
                defMsg="\nNode(s) %1$s is not in participating nodes of Resource Group %2$s. No report is generated for these nodes\n\n"
                displayMsg("",44,"scripts.cat",54,defMsg,','.join(nonParticipatingNodes),eachRG)    
            print('')            
    
    # Check if invalid RGs are given as input
    if invalidRGs:
        for eachRG in invalidRGs:
            defMsg="ResourceGroup \"%1$s\" is not configured in cluster or is not synchronized, hence not considered for analysis"
            displayMsg("",44,"scripts.cat",25,defMsg,eachRG)

    
def displayResourceReport(timeStamp,timeToDisplay,averageTimeToDisplay):
    """
    Function    : displayResourceReport
    Description : This function displays the Resource details
    Arguments   : timestamp - timestamp of the latest occurence
                  timeToDisplay - time taken in latest occurence
                  averageTimeToDisplay - Average time taken
    Return      : None 
    """
    defMsg="Time at which latest event occurred         : %1$s"
    if timeStamp != "NULL" and timeStamp != EMPTY_DATA:
        displayMsg("",44,"scripts.cat",30,defMsg,timeStamp[:-4])
    else:
        displayMsg("",44,"scripts.cat",30,defMsg,EMPTY_DATA)
    if timeToDisplay > 0:
        defMsg="Time taken for the latest event (HH:MM:SS)             : %1$s"
        displayMsg("",44,"scripts.cat",96,defMsg,convertSecondsTo(timeToDisplay))
    else:
        defMsg="Time taken for the latest event             : %1$s"
        displayMsg("",44,"scripts.cat",90,defMsg,EMPTY_DATA)
    if averageTimeToDisplay > 0:
        defMsg="Average time taken for recent occurrences (HH:MM:SS)   : %1$s"
        displayMsg("",44,"scripts.cat",97,defMsg,convertSecondsTo(averageTimeToDisplay))
    else:
        defMsg="Average time taken for recent occurrences   : %1$s"
        displayMsg("",44,"scripts.cat",91,defMsg,EMPTY_DATA)

def displayResourceCentricReport(resourceType,resourceObjects,invalidResources):
    """
    Function    : displayResourceCentricReport
    Description : This function displays the Resource details
    Arguments   : resourceType - Type of resource
                  resourceObjects - Objects dictionary, key is resource name, value is object
                  invalidResources - list of invalid resources
    Return      : None 
    """
    if resourceObjects:
        defMsg="Resource Type                                   : %1$s"
        displayMsg("",44,"scripts.cat",39,defMsg,resourceType)
    
        onlineString=""
        offlineString=""
    
        if resourceType == "SERVICE_LABEL":
            onlineString="Alias"
            offlineString="Remove"

        elif resourceType == "APPLICATIONS":
            onlineString="Start"
            offlineString="Stop"

        elif resourceType == "VOLUME_GROUP":
            onlineString="Varyon"
            offlineString="Varyoff"

        elif resourceType == "FILESYSTEM":
            onlineString="Mount"
            offlineString="Unmount"

        elif resourceType == "NFS":
            onlineString="Activate"
            offlineString="Deactivate"

        elif resourceType == "WPAR_NAME":
            onlineString="Start"
            offlineString="Stop"
        for resource in resourceObjects.keys():
            for node in resourceObjects[resource].nodesToAnalyze:
                defMsg="\nNode                                  : %1$s"
                displayMsg("",44,"scripts.cat",28,defMsg,node)
                for operation in ["online","offline"]:
                    defMsg="Operation performed                         : %1$s"
                    if operation == "online":
                        displayMsg("",44,"scripts.cat",41,defMsg,onlineString)
                        # resourceOnlineTime[node] has list in format [timestamp,timetaken]
                        timeStamp=resourceObjects[resource].resourceOnlineTime[node][0]
                        timeToDisplay=round(resourceObjects[resource].resourceOnlineTime[node][1],2)
                        averageTimeToDisplay=round(resourceObjects[resource].averageOnlineTime[node],2)
                    else:
                        displayMsg("",44,"scripts.cat",41,defMsg,offlineString)
                        # resourceOfflineTime[node] has list in format [timestamp,timetaken]
                        timeStamp=resourceObjects[resource].resourceOfflineTime[node][0]
                        timeToDisplay=round(resourceObjects[resource].resourceOfflineTime[node][1],2)
                        averageTimeToDisplay=round(resourceObjects[resource].averageOfflineTime[node],2)
                
                    defMsg="Resource name                               : %1$s"
                    displayMsg("",44,"scripts.cat",40,defMsg,resource)
                    displayResourceReport(timeStamp,timeToDisplay,averageTimeToDisplay)
            
    # Check if invalid RGs are given as input
    if invalidResources:
        for eachResource in invalidResources:
            defMsg="Resource %1$s is either not configured or does not belong to any Resource Group"
            displayMsg("",44,"scripts.cat",75,defMsg,eachResource)
    
    
def nodeCentricReport(nodes,isDetailed,isSiteCentric,guiFlag,average,dateList):
    """
    Function    : nodeCentricReport
    Description : This function generates node centric report
                  and displays it to user
    Arguments   :
        nodeList      : Validated node list
        isDetailed    : True, for detailed node centric report
                        False, for non detailed node centric report
        isSiteCentric : True, if called from site centric report
                        False, Otherwise
        guiFlag       : True, for report generate in JSON format
                        False, Otherwise
        average       : Report to generate average number of event occurences
        dateList      : [startdate,enddate]
    Return      : None
    """
    # global variables are used by ResourceGroup class
    global nodeList
    nodeList = nodes.split(',')
    nodeData = OrderedDict()
    rgsInNode = OrderedDict()
    rgData={}
    rohaData={}
    for node in nodeList:
        nodeData[node] = {}
        rgsInNode[node] = {}
        # Create an object for NodeCentricClass for each node
        obj=NodeCentricClass(node)

        # Process and fetch timeline information for node_up and
        # node_down events of node
        rgsInNode[node]=obj.resGroupInNode[node]
        nodeData[node]=obj.prepareNodeCentricReport(average,dateList)

    # For detailed report, fetch the information related to RG
    if isDetailed == True:
        rgs=""
        cmd="clodmget -n -f group HACMPgroup"
        status,rgs=executeCommand("",cmd)
        if not status and rgs!="":
            rgs=rgs.split('\n')
            for rg in rgs:
                rgData[rg]=ResourceGroup(rg,average,dateList)
                rgData[rg].prepareRGReport(average,dateList)
                rgData[rg].calculateTotalTime()
                
        # Fetch information about ROHA Acquire and Release time on each node
        command="clmgr query roha"
        status,output=executeCommand("",command)
        if status:
            log_message(UTILS_LOG,"Command to query ROHA failed, skipping analysis related to ROHA")
        else:
            if len(output) <= 0:
                log_message(UTILS_LOG,"ROHA is not configured")
                for node in nodeList:
                    rohaData[node]={}
                    rohaData[node]["acquire"]=["NULL",0,"NULL"]
                    rohaData[node]["acquireAssessment"]=["NULL",0,"NULL"]
                    rohaData[node]["release"]=["NULL",0,"NULL"]
                    rohaData[node]["releaseAssessment"]=["NULL",0,"NULL"]
            else:
                for node in nodeList:
                    rohaData[node]={}
                    obj=NodeCentricClass(node)
                    obj.prepareROHAReport(average,dateList)
                    rohaData[node]["acquire"]=obj.latestRohaAcquireData
                    rohaData[node]["acquireAssessment"]=obj.latestRohaAcquireAssessmentEvent
                    rohaData[node]["release"]=obj.latestRohaReleaseData
                    rohaData[node]["releaseAssessment"]=obj.latestRohaReleaseAssessmentEvent

    #Converting the Dictionary object to JSON Format
    if guiFlag == True:
        displayGuiReport(nodeData)
    else:                
        # Display processed information for each node
        if not isSiteCentric:
            defMsg="\nNode Centric Report:"
            displayMsg("",44,"scripts.cat",27,defMsg)
        displayNodeCentricReport(nodeData,rgsInNode,rgData,rohaData,isDetailed)
    return None

def displayGuiReport(dictionary):
    """
    Function    : playGuiReport 
    Description : This function convert dictionary object to JSON format
    Arguments   :
        dictionary: Dictionary object 
    Return      : None
    """
    print(json.dumps(dictionary))
    return None	

def siteCentricReport(siteList,validatedNodes,isDetailed,average,dateList):
    """
    Function    : siteCentricReport
    Description : This function generates site centric report
                  and displays it to user
    Arguments   :
        siteList: Validated input site list
        validatedNodes: Validated cluster node list
        isDetailed: True, for detailed site centric report
                    False, for site centric report
        average: Report to generate average number of event occurences
        dateList: [startdate,enddate]
    Return      : None
    """
    siteData = OrderedDict()
    nodesInsite = OrderedDict() 
    # Fetch node list configured in each site
    for site in siteList.split(','):
        siteData[site] = {}
        nodesInsite[site] = []
        obj = SiteCentricClass(site)
        nodesInsite[site] = obj.nodesInsite[site]
        # Remove the nodes in site which are not reachable
        for node in nodesInsite[site]:
            if node not in validatedNodes.split(','):
                nodesInsite[site].remove()
        nodesInsite[site] = ','.join(nodesInsite[site])
        # Prepare site centric report
        siteData[site] = obj.prepareSiteCentricReport(average,dateList)

    # Display site centric report
    defMsg="\nSite Centric Report:"
    displayMsg("",44,"scripts.cat",72,defMsg)
    displaySiteCentricReport(siteData,nodesInsite,isDetailed,average,dateList)

    return None

def generateRGCentricReport(RGs,managedNodes,isDetailed,guiFlag,average,dateList):
    """ 
    Function    : generateRGCentricReport
    Description : This function generates RG centric report based on inputs
    Arguments   : 
                  RGs - List of Resource Groups with comma separated or "ALL"
                  nodes - List of nodes with comma separated on which analysis has to be performed
                  isDetailed - True if user specifies -d option
                               False if user does not specify -d option
                  guiFlag - True, for report generate in JSON format
                            False, Otherwise
                  average - Report to generate average number of event occurences
                  dateList - [startdate,enddate]

    Return      : None 
    """
    # global variables are used by display function
    global nodeList
    nodeList=managedNodes.split(",")
    rgList=[]
    invalidRGs=[]
    if os.path.isdir("%s" % ACD):
        command="ODMDIR=%s clodmget -f group -n HACMPgroup" % ACD
    else:
        command="clodmget -f group -n HACMPgroup"
    
    status,output=executeCommand("",command)
    if len(output) > 0:
        allRGs=output.split("\n")
    else:
        defMsg="Warning: No Resource Groups are configured in cluster, analysis is not done"
        displayMsg("",44,"scripts.cat",42,defMsg)
        # display warning message when no Resource Groups are configured and exit with '0'
        sys.exit(0)
        
    if RGs.lower() == "all":
        rgList=allRGs
    else:
        inputRGs=RGs.split(",")
        # Validating input RGs
        # if inputRGs is the subset of all RGs, then consider all inputRGs for analysis
        if set(inputRGs) <= set(allRGs):
            rgList=list(set(inputRGs))
        else:
            # Get the list of inputRGs which are not configured in cluster
            invalidRGs=list(set(inputRGs) - set(allRGs))
            # Removing invalidRGs from inputRGs
            rgList=list(set(inputRGs)-set(invalidRGs))
    
    # RGObjects is a dictionary with RG name as key and object as value
    RGObjects={}
    RGAppMonObjects={}
    for eachRG in rgList:
        RGObjects[eachRG]=ResourceGroup(eachRG,average,dateList)
        RGObjects[eachRG].prepareRGReport(average,dateList)
        RGObjects[eachRG].calculateTotalTime()
        RGAppMonObjects[eachRG]=ResourceGroup(eachRG,average,dateList)
        RGAppMonObjects[eachRG].appMonTime(average,dateList)
    
    #Converting the Dictionary object to JSON Format
    if guiFlag == True:
        rgDict = {}
        for eachRG in rgList:
            rgDict[eachRG] = []
            rgDict[eachRG].append(RGObjects[eachRG].__dict__)
        displayGuiReport(rgDict)
    else:
        displayRGCentricReport(RGObjects,rgList,invalidRGs,isDetailed)
        if isDetailed==True:
            displayAppMonReport(RGAppMonObjects,rgList,invalidRGs)
    return None

def generateResourceReport(managedNodes,inputResourceType,inputResources,guiFlag,average,dateList):
    """
    Function    : generateResourceReport
    Description : This function generates report for resources of resourceType
    Arguments   : average,resource Type and list of resources or all
                  dateList - [startdate,enddate]
    Return      : None 
    """
    global nodeList
    resourcesInCluster=[]
    nodeList=managedNodes.split(",")

    # For service_ip, update the input resource type to SERVICE_LABEL which
    # is used in HACMPresource class
    if inputResourceType == "SERVICE_IP":
        inputResourceType="SERVICE_LABEL"

    # Validating Resources
    invalidResources=[]
    resourcesToAnalyze=[]
    isACDExists=False
    if os.path.isdir("%s" % ACD):
        isACDExists=True
    if inputResourceType == "FILESYSTEM":
        if isACDExists:
            command="ODMDIR=%s clodmget -n -q name=VOLUME_GROUP -f value HACMPresource" % ACD
        else:
            command="clodmget -n -q name=VOLUME_GROUP -f value HACMPresource"
        status,output=executeCommand("",command)
        if output:
            resourcesInCluster=output.split("\n")
    elif inputResourceType == "NFS":
        if isACDExists:
            command="ODMDIR=%s clodmget -n -q name=MOUNT_FILESYSTEM -f value HACMPresource" % ACD
        else:
            command="clodmget -n -q name=MOUNT_FILESYSTEM -f value HACMPresource"
        status,output=executeCommand("",command)
        if output:
            for eachNFS in output.split("\n"):
                resourcesInCluster.append(eachNFS.split(";")[1])
    else:
        if isACDExists:
            command="ODMDIR=%s clodmget -n -q name=%s -f value HACMPresource" % (ACD,inputResourceType)
        else:
            command="clodmget -n -q name=%s -f value HACMPresource" % (inputResourceType)
        status,output=executeCommand("",command)
        if output:
            resourcesInCluster=output.split("\n")

    # Exit if resource of inputResourceType is not configured in cluster
    if output == "":
        defMsg="%1$s resource is not configured in cluster.\n"
        displayMsg("",44,"scripts.cat",82,defMsg,inputResourceType)
        # display warning message when no resource are configured and exit with '0'
        sys.exit(0)

    if ','.join(inputResources).lower() == "all":
        if inputResourceType == "FILESYSTEM":
            resources=[]
            # Get the filesystem list associated with Resource Group
            command='clodmget -n -q "name=FILESYSTEM" -f value HACMPresource' 
            status,output = executeCommand("",command)
            resources=output.split('\n')
            for eachVG in resourcesInCluster:
                # Get the resource group name parameter using the volume group name
                command="clodmget -n -q 'name=VOLUME_GROUP and value=%s' -f group HACMPresource" % eachVG
                status,rgname=executeCommand("",command)
                # Get the file system name or ALL parameter using the resource group name
                command="clodmget -n -q 'name=FILESYSTEM and group=%s' -f value HACMPresource" % rgname 
                status,fsname=executeCommand("",command)
                # Get the filesystem list associated with Volume Group
                filesystems=""   
                command="lsvgfs %s " % (eachVG)
                status,filesystems = executeCommand("",command)
                if len(filesystems):
                    filesystems=filesystems.split("\n")
                for eachFS in filesystems:
                    if fsname == "ALL":
                        resourcesToAnalyze.append(eachFS)
                    else:
                        # check for the resource group FS matches with volume group FS
                        if eachFS in resources:
                            resourcesToAnalyze.append(eachFS)
        else:
            resourcesToAnalyze=resourcesInCluster
    else:
        for inputResource in inputResources:
            if inputResourceType == "FILESYSTEM":
                command="clodmget -n -q value=%s -f name CuAt" % inputResource
                status,lvName=executeCommand("",command)
                if len(lvName) == 0:
                    invalidResources.append(inputResource)
                    continue
                command="lslv %s | grep -w 'VOLUME GROUP'" % lvName
                status,output=executeCommand("",command)
                vgName=output.split()[-1]
                resType="VOLUME_GROUP"
                # Check vgName is available in resource cluster 
                if vgName in resourcesInCluster:
                    # Get the resource group name parameter using the volume group name
                    command="clodmget -n -q 'name=VOLUME_GROUP and value=%s' -f group HACMPresource" % vgName
                    status,rgname=executeCommand("",command)
                    # Get the file system name or ALL parameter using the resource group name
                    command="clodmget -n -q 'name=FILESYSTEM and group=%s' -f value HACMPresource" % rgname
                    status,fsname=executeCommand("",command)
                    #If ALL has not been configured in ODM HACMPresource
                    #for FILESYSTEM for the rgname,check if the inputResource
                    #exists in the filesystems in ODM HACMPresource for the RG
                    #if exists it means the filesystem will be considered for
                    #report else not.
                    #If ALL has been configured in ODM HACMPresource for FILESYSTEM
                    #for the rgname the filesystem will be considered for report 
 
                    if(fsname != "ALL"):
                        if(inputResource in fsname.split('\n')):
                            inputResource = vgName
                    else:
                        inputResource = vgName  
                    
            if inputResource not in resourcesInCluster:
                invalidResources.append(inputResource)
        resourcesToAnalyze=list(set(inputResources)- set(invalidResources))

    # resourceObjects is a dictionary with resource name as key and object as value
    resourceObjects={}
    for resource in resourcesToAnalyze:
        resourceObjects[resource]=Resource(inputResourceType,resource,average,dateList)

    if guiFlag == True:
        resourceDict = {}
        for resource in resourcesToAnalyze:
            resourceDict[resource] = []
            resourceDict[resource].append(resourceObjects[resource].__dict__)
        displayGuiReport(resourceDict)
    else: 
        displayResourceCentricReport(inputResourceType,resourceObjects,invalidResources)
    return None    
        
def generateVerificationReport(managedNodes,isDetailed,guiFlag,average,dateList):
    """ 
    Function    : generateVerificationReport
    Description : This function display the verification and synchronization availability report
    Arguments   : managedNodes - List of nodes with comma separated on which analysis has to be performed 
                isDetailed  - True, if detailed report is needed
                              FALSE, otherwise
                  average - Report to generate average number of event occurences
                  dateList - [startdate,enddate]
    Return      : None 
    """
    # global variables are used by display function
    # Initialize the reportData dictionary
       
    # global variables are used by display function
    verData = OrderedDict()
    verNodeData = OrderedDict() 
    syncNodeData = OrderedDict() 
    for nodeName in managedNodes.split(','):
        verData[nodeName] = {}
        verNodeData[nodeName] = {}
        syncNodeData[nodeName] = {}
        # Process and fetch timeline information for verification begin and end. 
        verData=lookUPEvents("verification",nodeName,nodeName,"",average,dateList) 
        verNodeData[nodeName]["verify"]=verData[nodeName]
        # Process and fetch timeline information for synchronization begin and end. 
        verData=lookUPEvents("synchronization",nodeName,nodeName,"",average,dateList)
        syncNodeData[nodeName]["sync"]=verData[nodeName]
    #Converting the Dictionary object to JSON Format
    if guiFlag == True:
        displayGuiReport(verNodeData)
        return None
    # Display processed information for each node
    defMsg="\nCluster Verification Report:"
    displayMsg("",44,"scripts.cat",49,defMsg)
    displayNodeCentricReport(verNodeData,{},{},{},False) 
    # Display processed information for each node
    defMsg="\nCluster Synchronization Report:"
    displayMsg("",44,"scripts.cat",73,defMsg)
    displayNodeCentricReport(syncNodeData,{},{},{},False) 
    if isDetailed == True:
        detailreport = OrderedDict()
        detailNodeData = OrderedDict()
        for nodeName in managedNodes.split(','):
            detailreport[nodeName] = {}
            detailNodeData[nodeName]={}
            # Process and fetch timeline information for Reconfigure of resource in dare
            detailreport=lookUPEvents("dare_reconfig","",nodeName,"",average,dateList)
            detailNodeData[nodeName]["reconfigdetailed"]=detailreport[nodeName]
        defMsg="\nDetailed report of DARE operations:"
        displayMsg("",44,"scripts.cat",77,defMsg)
        displayNodeCentricReport(detailNodeData,{},{},{},False)

def displayNetworkUpDown(nodeObject):
    """
    Function    : displayNetworkUpDown 
    Description : This function displays network up/down report
                  for the node whose object is passed as argument
    Arguments   :
                 nodeObject - object of the NodeCentricClass  
    Return      : None
    """
    defMsg="\nNode                                        : %1$s"
    displayMsg("",44,"scripts.cat",28,defMsg,nodeObject.nodeName)
    if len(nodeObject.networkDownEvents.keys())>0:
        for occurrence in nodeObject.networkDownEvents.keys():
            defMsg="\nNetwork Name                               : %1$s"
            displayMsg("",44,"scripts.cat",56,defMsg,nodeObject.networkDownEvents[occurrence][3])
            defMsg="Event or Operation performed                : %1$s"
            displayMsg("",44,"scripts.cat",29,defMsg,"Network_Down")
            defMsg="Time at which event occured                 : %1$s"
            displayMsg("",44,"scripts.cat",55,defMsg,nodeObject.networkDownEvents[occurrence][0])
            defMsg="Time taken for the event (HH:MM:SS)                   : %1$s"
            displayMsg("",44,"scripts.cat",104,defMsg,convertSecondsTo(nodeObject.networkDownEvents[occurrence][1]))
    else:
            defMsg="Network Down events are not found."
            displayMsg("",44,"scripts.cat",92,defMsg)
 
    if len(nodeObject.networkUpEvents.keys())>0:
        for occurrence in nodeObject.networkUpEvents.keys():
            defMsg="\nNetwork Name                               : %1$s"
            displayMsg("",44,"scripts.cat",56,defMsg,nodeObject.networkUpEvents[occurrence][3])
            defMsg="Event or Operation performed                : %1$s"
            displayMsg("",44,"scripts.cat",29,defMsg,"Network_Up") 
            defMsg="Time at which event occured                 : %1$s"
            displayMsg("",44,"scripts.cat",55,defMsg,nodeObject.networkUpEvents[occurrence][0])
            defMsg="Time taken for the event (HH:MM:SS)                   : %1$s"
            displayMsg("",44,"scripts.cat",104,defMsg,convertSecondsTo(nodeObject.networkUpEvents[occurrence][1]))
    else:
            defMsg="Network Up events are not found."
            displayMsg("",44,"scripts.cat",93,defMsg)

    return None

def displayswapAdapterEvent(nodeObject):
    """
    Function    : displayswapAdapterEvent
    Description : This function displays swapadapterevent report
                  for the node whose object is passed as argument
    Arguments   :
                 nodeObject - Dictionary with swap adapter statistics  
    Return      : None
    """
    #Display the swap adapter event report
    defMsg="\nNode                                        : %1$s"
    displayMsg("",44,"scripts.cat",28,defMsg,nodeObject.nodeName)
    if len(nodeObject.swapAdapterDict.keys())>0:
        for occurrence in nodeObject.swapAdapterDict.keys():
            defMsg="\nNetwork Name                                : %1$s"
            displayMsg("",44,"scripts.cat",56,defMsg,nodeObject.swapAdapterDict[occurrence][3])
            defMsg="Event or Operation performed                : %1$s"
            displayMsg("",44,"scripts.cat",29,defMsg,"swap_adapter")
            defMsg="Time at which event occured                 : %1$s"
            displayMsg("",44,"scripts.cat",55,defMsg,nodeObject.swapAdapterDict[occurrence][0])
            event_time=round(float(nodeObject.swapAdapterDict[occurrence][1]),2) 
            defMsg="Time taken for the event (HH:MM:SS)                   : %1$s"
            displayMsg("",44,"scripts.cat",104,defMsg,convertSecondsTo(event_time))
            defMsg="Interface name                              : %1$s"
            displayMsg("",44,"scripts.cat",60,defMsg,nodeObject.swapAdapterDict[occurrence][5])
            defMsg="Service IP name                             : %1$s"
            displayMsg("",44,"scripts.cat",61,defMsg,nodeObject.swapAdapterDict[occurrence][4])
    else:
        defMsg="Swap adapter events are not found"
        displayMsg("",44,"scripts.cat",94,defMsg)
    return None

def displayConfigTooLongEvent(nodeObject):
    """
    Function    : displayConfigTooLongEvent
    Description : This function displays displayConfigTooLongEvent report
                  for the node whose object is passed as argument
    Arguments   :
                 nodeObject - Dictionary with displayConfigTooLongEvent statistics
    Return      : None
    """
    #Display the Config Too Long event report
    defMsg="\nNode                                        : %1$s"
    displayMsg("",44,"scripts.cat",28,defMsg,nodeObject.nodeName)
    defMsg="Event or Operation performed                : %1$s"
    displayMsg("",44,"scripts.cat",29,defMsg,"ConfigTooLong")
    if len(nodeObject.ConfigTooLongDict.keys())>0:
        for occurrence in nodeObject.ConfigTooLongDict.keys():
            defMsg="\nConfig_too_long event name                  : %1$s"
            displayMsg("",44,"scripts.cat",109,defMsg,nodeObject.ConfigTooLongDict[occurrence][3])
            defMsg="Time at which event occured                 : %1$s"
            displayMsg("",44,"scripts.cat",55,defMsg,nodeObject.ConfigTooLongDict[occurrence][0])
            event_time=round(float(nodeObject.ConfigTooLongDict[occurrence][1]),2)
            defMsg="Time taken for the event (HH:MM:SS)                   : %1$s"
            displayMsg("",44,"scripts.cat",104,defMsg,convertSecondsTo(nodeObject.ConfigTooLongDict[occurrence][1]))
    else:
        defMsg="Config_too_long events are not found"
        displayMsg("",44,"scripts.cat",116,defMsg)
    return None

def displayAixDownEvent(nodeObject):
    """
    Function    : displayAixDownEvent
    Description : This function displays displayAixDownEvent report
                  for the node whose object is passed as argument
    Arguments   :
                 nodeObject - Dictionary with displayAixDownEvent statistics
    Return      : None
    """
    #Display the aix down event report
    sum=0
    defMsg="\nNode                                        : %1$s"
    displayMsg("",44,"scripts.cat",28,defMsg,nodeObject.nodeName)
    defMsg="Event or Operation performed                : %1$s"
    displayMsg("",44,"scripts.cat",29,defMsg,"AIX\ down")
    average_count=len(nodeObject.AixDownDict.keys())
    if average_count>0:
        for occurrence in nodeObject.AixDownDict.keys():
            sum=sum+nodeObject.AixDownDict[occurrence][1]
        defMsg="Time at which event occured                 : %1$s"
        displayMsg("",44,"scripts.cat",55,defMsg,nodeObject.AixDownDict[occurrence][0])
        event_time=round(float(nodeObject.AixDownDict[occurrence][1]),2)
        defMsg="Time taken for the event (HH:MM:SS)                   : %1$s"
        displayMsg("",44,"scripts.cat",104,defMsg,convertSecondsTo(nodeObject.AixDownDict[occurrence][1]))
        defMsg="Average time taken for recent %1$d occurrences (HH:MM:SS) : %2$s"
        displayMsg("",44,"scripts.cat",98,defMsg,average_count,convertSecondsTo(sum))

    else:
        defMsg="AIX down events are not found"
        displayMsg("",44,"scripts.cat",118,defMsg)
    return None


def displayMiscellaneousReport(nodeObjects,nodeList):
    """
    Function    : displayMiscellaneousReport 
    Description : This function displays miscellaneous report
                  by calling different display functions for the 
                  reports which are displayed as part of the
                  miscellaneous report 
    Arguments   :
                 nodeList -  List of valid nodes, user wants data for
                 nodeObjects - Dictionary of the objects for the nodes in nodeList
    Return      : None
    """
    defMsg="\nNetwork Events Report:"
    displayMsg("",44,"scripts.cat",58,defMsg)
    for node in nodeList:
        displayNetworkUpDown(nodeObjects[node])
    #Display the swap_adapter information from the availability logs
    defMsg="\nSwap Adapter Events Report:"
    displayMsg("",44,"scripts.cat",59,defMsg)
    for node in nodeList:
        displayswapAdapterEvent(nodeObjects[node])
    #Display the config_too_long information from the availability logs
    defMsg="\nConfig_too_long Events Report:"
    displayMsg("",44,"scripts.cat",110,defMsg)
    for node in nodeList:
        displayConfigTooLongEvent(nodeObjects[node])
    #Display the aix_down information from the availability logs
    defMsg="\nAIX down events report:"
    displayMsg("",44,"scripts.cat",117,defMsg)
    for node in nodeList:
        displayAixDownEvent(nodeObjects[node])
    return None
 
def generateMiscReport(managedNodes,average,dateList):
    """
    Function    : generateMiscReport 
    Description : This function generates miscellaneous report
                  and displays it to user
    Arguments   :
                  managedNodes - List of valid nodes, user wants data for 
                  average - Report to generate average number of event occurences
                  dateList - [startdate,enddate]
    Return      : None
    """
    nodeList =managedNodes.split(',')
    # nodeObjects is a dictionary with node name as key and object as value
    nodeObjects={}
    for node in nodeList:
        nodeObjects[node]=MiscEventsClass(node)
        nodeObjects[node].getNetworkUpEvents(average,dateList)
        nodeObjects[node].getNetworkDownEvents(average,dateList)
        nodeObjects[node].getSwapAdapterEvents(average,dateList)
        nodeObjects[node].getConfigTooLongEvents(average,dateList)
        nodeObjects[node].getAixDownEvents(average,dateList)
    displayMiscellaneousReport(nodeObjects,nodeList)
    return None
