#!/usr/openv/pdde/pdopensource/bin/python3

#
# $Copyright: Copyright (c) 2022 Veritas Technologies LLC. All rights reserved $
#

from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
sys.path.insert(0, '/usr/openv/pdde/pdopensource/lib/python3.9/site-packages/')
from six.moves import range
from six.moves import input
import argparse
import logging
import os
import requests
import shlex
import socket
import subprocess
import time
import json
import signal
import atexit

# Handle ctrl+c
def signal_handler(signal, frame):
  sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)

R_SUCESS = 0
R_FAIL = 1
master_server = ''
media_server = ''
instance_name = ''
instance_version = ''
logger = None
msdp_cloud_storage_type = 'PureDisk'
storage_path = ''
cloud_lsu_name = ''
reserved_free_space_percent = 10
cloud_data_cache_size_min = 10
cloud_data_cache_size_max = 500
cloud_meta_cache_size_default = 10
cloud_map_cache_size_default = 5
nbu_instance = False
msdp_cloud_dsid = 0

kms_config_file = '/tmp/imagesharing_kms_config'
identity_file = '/tmp/imagesharing_cloudlsu_identity'
imagesharing_name_file = '/tmp/imagesharing_name.dat'
sts_config_file = '/tmp/imagesharing_sts_config.txt'
dvlist_file = '/tmp/imagesharing_dvlist.txt'

def cleanup():
    if os.access(kms_config_file, os.F_OK):
        os.remove(kms_config_file)
    if os.access(identity_file, os.F_OK):
        os.remove(identity_file)
    if os.access(imagesharing_name_file, os.F_OK):
        os.remove(imagesharing_name_file)
    if os.access(sts_config_file, os.F_OK):
        os.remove(sts_config_file)
    if os.access(dvlist_file, os.F_OK):
        os.remove(dvlist_file)

atexit.register(cleanup)

#create argumentparser object
arg_parser = argparse.ArgumentParser()

# Cloud provider key string
CP_STR_AWS_S3 = 'AWS_S3'
CP_STR_MS_AZURE = 'MS_AZURE'
CP_STR_CUSTOM_S3 = 'CUSTOM_S3'
CP_STR_END = 'CP_END' # End flag of cloud providers

# Cloud providers list
CLOUD_PROVIDERS = {}
CLOUD_PROVIDERS[CP_STR_AWS_S3] = 0
CLOUD_PROVIDERS[CP_STR_MS_AZURE] = 1
CLOUD_PROVIDERS[CP_STR_CUSTOM_S3] = 2
CLOUD_PROVIDERS[CP_STR_END] = 3 # End flag of cloud providers

# Set help string which are related to cloud providers
help_str = {}
help_str['cloud_provider'] = []
help_str['cloud_provider'].append('Cloud Providers (default is 1):')
help_str['cloud_provider'].append(str(CLOUD_PROVIDERS[CP_STR_AWS_S3] + 1) + ': Amazon S3')
help_str['cloud_provider'].append(str(CLOUD_PROVIDERS[CP_STR_MS_AZURE] + 1) + ': Microsoft Azure')
help_str['cloud_provider'].append(str(CLOUD_PROVIDERS[CP_STR_CUSTOM_S3] + 1) + ': Third-party S3 compatibale cloud provider (HCP etc.)')
help_str['key_id'] = []
help_str['key_id'].append('Amazon Web Services access key ID')
help_str['key_id'].append('Microsoft Azure storage account name')
help_str['key_id'].append('Third-party S3 access key ID')
help_str['secret_key'] = []
help_str['secret_key'].append('Amazon Web Services secret access key')
help_str['secret_key'].append('Microsoft Azure storage access key')
help_str['secret_key'].append('Third-party S3 secret access key')
help_str['bucket_name'] = []
help_str['bucket_name'].append('Amazon Web Services S3 bucket name')
help_str['bucket_name'].append('Microsoft Azure storage container name')
help_str['bucket_name'].append('Third-party S3 bucket name')
help_str['bucket_sub_name'] = []
help_str['bucket_sub_name'].append('Amazon Web Services S3 bucket sub name')
help_str['bucket_sub_name'].append('Microsoft Azure storage container sub name')
help_str['bucket_sub_name'].append('Third-party S3 bucket sub name')

def get_full_help_str(help_str_key, seperator_char='/'):
    ret = ''
    if type(help_str[help_str_key]) is list:
        for index in range(len(help_str[help_str_key])):
            if index != 0:
                ret += seperator_char
            ret += help_str[help_str_key][index]
    else:
        ret = help_str[help_str_key]
    return ret

#input argument
arg_parser.add_argument('-cp', '--cloud_provider', required=False, default=1, type=int, help=get_full_help_str('cloud_provider', ' '))
arg_parser.add_argument('-k', '--key_id', required=False, default='', help=get_full_help_str('key_id'))
arg_parser.add_argument('-s', '--secret_key', required=False, default='', help=get_full_help_str('secret_key'))
arg_parser.add_argument('-b', '--bucket_name', required=False, default='', help=get_full_help_str('bucket_name'))
arg_parser.add_argument('-bs', '--bucket_sub_name', required=False, default='', help='Bucket/container sub name used in PureDisk storage server type')
arg_parser.add_argument('-m', '--storage_server', required=False, default=socket.gethostname(),  help='storage server name')
arg_parser.add_argument('-t', '--storage_type', required=False, default='',  help='storage server type (default is PureDisk)')
arg_parser.add_argument('-p', '--mount_point', required=False, default='/storage',  help='storage path (default is "/storage")')
arg_parser.add_argument('-c', '--cloud_instance', required=False, default='', help='cloud instance name of NetBackup')
arg_parser.add_argument('-e', '--kms_enabled', required=False, default='0', help='enabled/disabled kms encryption (default is 0)')
arg_parser.add_argument('-r', '--region', required=False, default='', help='Amazon Web Services service region (default is us-east-1)')
arg_parser.add_argument('-ms', '--master', required=False, default='', help='Master server that the bucket belongs to')
arg_parser.add_argument('-ma', '--media', required=False, default='', help='Media server that the bucket belongs to')
arg_parser.add_argument('-sn', '--sts_number', required=False, default='', help='Number of Storage Server, only used for media server case')
arg_parser.add_argument('-pt', '--provider_type', required=False, default='', help='Third-party S3 cloud provider type')
arg_parser.add_argument('-sh', '--s3_host', required=False, default='', help='Third-party S3 server host name')
arg_parser.add_argument('-sp', '--s3_http_port', required=False, type=int, default=80, help='Third-party S3 server http port (default is 80)')
arg_parser.add_argument('-sps', '--s3_https_port', required=False, type=int, default=443, help='Third-party S3 server https port (default is 443)')
arg_parser.add_argument('-ssl', '--ssl', required=False, default='1', help='Third-party S3 server SSL usage: 0: Disable SSL. 1: Enable SSL. (default is 1)')
arg_parser.add_argument('-csd', '--cloud_data_cache_size', required=False, default='', help='Cloud data cache size of PureDisk storage server (integers in GiB, default is all available free disk space excluding {_reserved_free_space_percent}%% of total disk space, minimum is {_cloud_data_cache_size_min})'.format(_reserved_free_space_percent=reserved_free_space_percent, _cloud_data_cache_size_min=cloud_data_cache_size_min))
arg_parser.add_argument('-csm', '--cloud_meta_cache_size', required=False, default='', help='Cloud metadata cache size of PureDisk storage server (integers in GiB, default is {_cloud_meta_cache_size_default})'.format(_cloud_meta_cache_size_default=cloud_meta_cache_size_default))
arg_parser.add_argument('-csma', '--cloud_map_cache_size', required=False, default='', help='Cloud map cache size of PureDisk storage server (integers in GiB, default is {_cloud_map_cache_size_default})'.format(_cloud_map_cache_size_default=cloud_map_cache_size_default))
args = arg_parser.parse_args()

if not args.storage_type:
    args.storage_type = 'PureDisk'

def valid_cloud_provider(cloud_provider):
    try:
        cloud_provider = int(cloud_provider)
    except Exception:
        return False
    if cloud_provider is not None and cloud_provider < len(help_str['cloud_provider']) and cloud_provider > 0:
        return True
    else:
        return False

cloud_provider_input = str(args.cloud_provider)
while not valid_cloud_provider(cloud_provider_input):
    if cloud_provider_input is not None:
        print(('Invalid cloud provider: %s' % cloud_provider_input))
    cloud_provider_input = input('Enter ' + get_full_help_str('cloud_provider', '\n') + '\n')
    if cloud_provider_input == '':
        cloud_provider_input = 1
    if valid_cloud_provider(cloud_provider_input):
        args.cloud_provider = int(cloud_provider_input)
        break
args.cloud_provider -= 1

while not args.key_id:
    args.key_id = input('Enter ' + help_str['key_id'][args.cloud_provider] + ': ').strip()
    if not args.key_id:
        print((help_str['key_id'][args.cloud_provider] + ' cannot be empty.'))
while not args.secret_key:
    args.secret_key = input('Enter ' + help_str['secret_key'][args.cloud_provider] + ': ').strip()
    if not args.secret_key:
        print((help_str['secret_key'][args.cloud_provider] + ' cannot be empty.'))
while not args.bucket_name:
    args.bucket_name = input('Enter ' + help_str['bucket_name'][args.cloud_provider] + ': ').strip()
    if not args.bucket_name:
        print((help_str['bucket_name'][args.cloud_provider] + ' cannot be empty.'))
while not args.bucket_sub_name:
    args.bucket_sub_name = input('Enter ' + help_str['bucket_sub_name'][args.cloud_provider] + ': ').strip()
    if not args.bucket_sub_name:
        print((help_str['bucket_sub_name'][args.cloud_provider] + ' cannot be empty.'))

def valid_cache_size(cache_size, minimum = -1):
    try:
        cache_size = int(cache_size)
    except Exception:
        return False
    if minimum >= 0 and cache_size < minimum:
        return False
    return True

def get_cache_size_error_message(cache_type, cache_value, minimum_value = -1):
    error_message = 'Invalid input: {_cache_type} with value [{_cache_value}]. Only integer is acceptable.'\
        .format(_cache_type=cache_type, _cache_value=cache_value)
    if minimum_value >= 0:
        error_message += ' Minimum value is {_minimum_value}.'.format(_minimum_value=minimum_value)
    return error_message

def get_cache_size_ignore_message(cache_type, cache_value):
    return 'The {_cache_type} that you have specified [{_cache_value}] will be ignored since it is only applicable for PureDisk storage server.'\
        .format(_cache_type=cache_type, _cache_value=cache_value)

def get_disk_space_info(path):
    st = os.statvfs(path)
    total = st.f_blocks * st.f_frsize
    free = st.f_bavail * st.f_frsize
    return total, free

def log_disk_space(disk_total_bytes, disk_free_bytes, required_bytes, cloud_upload_cache_size_byte, cloud_map_cache_size_byte, cloud_meta_cache_size_byte, cloud_data_cache_size_byte, required_free_bytes, calc_free_bytes, automatic_calc):
    one_gib_bytes = 1024 * 1024 * 1024
    one_gib_bytes_float = float(one_gib_bytes)
    logger.info('Information about disk space required by MSDP Cloud of PureDisk storage server:')
    logger.info('Current total disk space of mount point [%s] is %.2f GiB.' , args.mount_point, disk_total_bytes / one_gib_bytes_float)
    logger.info('Current free disk space of mount point [%s] is %.2f GiB.' , args.mount_point, disk_free_bytes / one_gib_bytes_float)
    logger.info('Required disk space is %.2f GiB, including:', required_bytes / one_gib_bytes_float)
    logger.info('CLOUD_UPLOAD_CACHE_SIZE = %.2f GiB (fixed size)', cloud_upload_cache_size_byte / one_gib_bytes_float)
    logger.info('CLOUD_MAP_CACHE_SIZE = %.2f GiB', cloud_map_cache_size_byte / one_gib_bytes_float)
    logger.info('CLOUD_META_CACHE_SIZE = %.2f GiB', cloud_meta_cache_size_byte / one_gib_bytes_float)
    if automatic_calc:
        logger.info('CLOUD_DATA_CACHE_SIZE = %.2f GiB (automatically calculated)', cloud_data_cache_size_byte / one_gib_bytes_float)
    else:
        logger.info('CLOUD_DATA_CACHE_SIZE = %.2f GiB', cloud_data_cache_size_byte / one_gib_bytes_float)
    logger.info('Required reserving free space = %.2f GiB (Total disk space * %d%%)', required_free_bytes / one_gib_bytes_float, reserved_free_space_percent)
    logger.info('Left minimum free space = %.2f GiB', calc_free_bytes / one_gib_bytes_float)

def log_disk_full():
    logger.error('Disk free space is not enough. (Left minimum free space is less than required reserved free space)')

def check_disk_free_space():
    disk_total_bytes, disk_free_bytes = get_disk_space_info(args.mount_point)
    one_gib_bytes = 1024 * 1024 * 1024
    required_min_free_bytes = int(disk_total_bytes * reserved_free_space_percent / 100.0) + 1
    cloud_upload_cache_size_byte = one_gib_bytes
    cloud_map_cache_size_byte = int(args.cloud_map_cache_size) * one_gib_bytes
    cloud_meta_cache_size_byte = int(args.cloud_meta_cache_size) * one_gib_bytes
    if args.cloud_data_cache_size:
        cloud_data_cache_size_byte = int(args.cloud_data_cache_size) * one_gib_bytes
        required_bytes = cloud_upload_cache_size_byte + cloud_map_cache_size_byte + cloud_meta_cache_size_byte + cloud_data_cache_size_byte
        usable_free_bytes = disk_free_bytes - required_bytes
        log_disk_space(disk_total_bytes, disk_free_bytes, required_bytes, cloud_upload_cache_size_byte, cloud_map_cache_size_byte, cloud_meta_cache_size_byte, cloud_data_cache_size_byte, required_min_free_bytes, usable_free_bytes, False)
        if usable_free_bytes <= required_min_free_bytes:
            log_disk_full()
            return R_FAIL
    else:
        min_cloud_data_cache_size_byte = 10 * one_gib_bytes
        cloud_data_cache_size_byte = min_cloud_data_cache_size_byte
        required_bytes = cloud_upload_cache_size_byte + cloud_map_cache_size_byte + cloud_meta_cache_size_byte + min_cloud_data_cache_size_byte
        usable_free_bytes = disk_free_bytes - required_bytes
        if usable_free_bytes > required_min_free_bytes:
            cloud_data_cache_size_byte = disk_free_bytes - cloud_upload_cache_size_byte - cloud_map_cache_size_byte - cloud_meta_cache_size_byte - required_min_free_bytes
            cloud_data_cache_size = cloud_data_cache_size_byte // one_gib_bytes
            if cloud_data_cache_size > cloud_data_cache_size_max:
                cloud_data_cache_size = cloud_data_cache_size_max
            cloud_data_cache_size_byte = cloud_data_cache_size * one_gib_bytes
            required_bytes = cloud_upload_cache_size_byte + cloud_map_cache_size_byte + cloud_meta_cache_size_byte + cloud_data_cache_size_byte
            usable_free_bytes = disk_free_bytes - required_bytes
            log_disk_space(disk_total_bytes, disk_free_bytes, required_bytes, cloud_upload_cache_size_byte, cloud_map_cache_size_byte, cloud_meta_cache_size_byte, cloud_data_cache_size_byte, required_min_free_bytes, usable_free_bytes, True)
            args.cloud_data_cache_size = cloud_data_cache_size
        else:
            log_disk_space(disk_total_bytes, disk_free_bytes, required_bytes, cloud_upload_cache_size_byte, cloud_map_cache_size_byte, cloud_meta_cache_size_byte, cloud_data_cache_size_byte, required_min_free_bytes, usable_free_bytes, True)
            log_disk_full()
            return R_FAIL
    return R_SUCESS

def check_input_args():
    if args.storage_type != msdp_cloud_storage_type:
        logger.error('Invalid storage_type %s. Only %s is supported.', args.storage_type, msdp_cloud_storage_type)
        return R_FAIL

    if args.cloud_provider < 0 or args.cloud_provider >= CLOUD_PROVIDERS[CP_STR_END]:
        logger.error('Invalid cloud_provider. Enter a valid cloud provider between [1, %s]', str(CLOUD_PROVIDERS[CP_STR_END]))
        return R_FAIL

    if ((args.master.strip() == '' and args.media.strip() != "" ) or (args.master.strip() != '' and args.media.strip() == "" )):
        logger.warning("Enter the master and media server at the same time.")
        return R_FAIL

    if args.cloud_provider == CLOUD_PROVIDERS[CP_STR_AWS_S3]:
        args.cloud_provider_str = CP_STR_AWS_S3
        if not args.region:
            args.region = 'us-east-1' # Defaut region is us-east-1
        if not args.cloud_instance:
            if args.region.startswith('cn-'):
                # Use amazon.cn for cn-* region
                args.cloud_instance = 'amazon.cn'
            else:
                args.cloud_instance = 'amazon.com'
        if not args.provider_type:
            args.provider_type = 'amazon'
    elif args.cloud_provider == CLOUD_PROVIDERS[CP_STR_MS_AZURE]:
        args.cloud_provider_str = CP_STR_MS_AZURE
        if not args.cloud_instance:
            args.cloud_instance = 'my-azure'
        if args.region:
            logger.warning('The region that you have specified [%s] will be ignored by Microsoft Azure.', args.region)
            args.region = ''
        if not args.provider_type:
            args.provider_type = 'azure'
    elif args.cloud_provider == CLOUD_PROVIDERS[CP_STR_CUSTOM_S3]:
        args.cloud_provider_str = CP_STR_CUSTOM_S3
        if args.cloud_instance:
            global nbu_instance
            nbu_instance = True
        else:
            if not args.provider_type or not args.s3_host:
                logger.error('-pt/--provider_type and -sh/--s3_host must be specified when using third-party S3 cloud provider.')
                logger.error('or -c/--cloud_instance should be specified when using Cloud Instance Name in NetBackup.')
                return R_FAIL
            args.cloud_instance = 'my-' + args.s3_host

    global storage_path
    global kms_config_file
    storage_path = args.mount_point

    if not (os.path.exists(args.mount_point)):
        os.makedirs(args.mount_point)
    elif (os.listdir(args.mount_point)):
        logger.error("mount_point: %s is not empty,please clean the folder!", args.mount_point)
        return R_FAIL

    if not args.cloud_map_cache_size:
        args.cloud_map_cache_size = cloud_map_cache_size_default
    if not valid_cache_size(args.cloud_map_cache_size):
        logger.error(get_cache_size_error_message('CLOUD_MAP_CACHE_SIZE', args.cloud_map_cache_size))
        return R_FAIL
    if not args.cloud_meta_cache_size:
        args.cloud_meta_cache_size = cloud_meta_cache_size_default
    if not valid_cache_size(args.cloud_meta_cache_size):
        logger.error(get_cache_size_error_message('CLOUD_META_CACHE_SIZE', args.cloud_meta_cache_size))
        return R_FAIL
    if args.cloud_data_cache_size:
        if not valid_cache_size(args.cloud_data_cache_size, cloud_data_cache_size_min):
            logger.error(get_cache_size_error_message('CLOUD_DATA_CACHE_SIZE', args.cloud_data_cache_size, 10))
            return R_FAIL
        ret = check_disk_free_space()
        if R_SUCESS != ret:
            return ret
    else:
        ret = check_disk_free_space()
        if R_SUCESS != ret:
            return ret

    return R_SUCESS

def check_hostname():
    print(('''
*****************************IMPORTANT TIPS!*********************************
Ensure that the hostname "%s" is in the FQDN format.
If the hostname is not in the FQDN format, the webservice might fail.''' % args.storage_server))
    prompt = input("Enter Y to continue or any other key to reset:")
    if prompt == 'Y' or prompt == 'y':
        return R_SUCESS
    else:
        return R_FAIL

def set_logger():
    global logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    file_handler = logging.FileHandler('/var/log/puredisk/image_sharing_config.log')
    file_handler.setLevel(logging.DEBUG)
    log_formatter_file = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(log_formatter_file)
    logger.addHandler(file_handler)
    console_handler = logging.StreamHandler(stream=sys.stdout)
    log_formatter_console = logging.Formatter('%(levelname)s - %(message)s')
    console_handler.setFormatter(log_formatter_console)
    logger.addHandler(console_handler)

def configure_msdp_webservice():
    sts_num = args.sts_number

    if not sts_num.strip():
        web_conf = open('/usr/openv/var/global/wsl/config/web.conf', 'a+')
        web_conf.write('msdpxserver.port=10086\n')
        web_conf.close()

    return R_SUCESS


def verify_msdp_webservice():
    time.sleep(20)

    cmd = ["/usr/openv/pdde/vpfs/bin/nb_admin_tasks", "--get_self_cert"]
    logger.info("[CMD]:%s", ' '.join(cmd))
    try:
        subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    return R_SUCESS

def get_dp_name():
    sts_num = args.sts_number
    cloud_provider = args.cloud_provider
    provider_type = args.provider_type

    if cloud_provider == CLOUD_PROVIDERS[CP_STR_AWS_S3]:
        return 'amazon_dp{_sts_num}'.format(_sts_num=sts_num)
    elif cloud_provider == CLOUD_PROVIDERS[CP_STR_MS_AZURE]:
        return 'azure_dp{_sts_num}'.format(_sts_num=sts_num)
    elif cloud_provider == CLOUD_PROVIDERS[CP_STR_CUSTOM_S3]:
        if nbu_instance:
            return '{_cloud_instance}_dp{_sts_num}'.format(_cloud_instance=args.cloud_instance, _sts_num=sts_num)
        else:
            return '{_provdier_type}_dp{_sts_num}'.format(_provdier_type=provider_type, _sts_num=sts_num)
    else:
        # Should not happen
        raise Exception('Unsupported cloud provider ' + str(cloud_provider))

def get_stu_name():
    return get_dp_name() + '_stu'

def setconfig(config_str):
    storage_server = args.storage_server
    storage_type = args.storage_type
    nbdevconfig = "/usr/openv/netbackup/bin/admincmd/nbdevconfig"

    config_file = open(sts_config_file, 'w')
    config_file.write(config_str)
    config_file.close()

    cmd_str = "{_nbdevconfig} -setconfig -stype {_storage_type} -storage_server {_storage_server} -configlist {_sts_config_file}"\
        .format(_nbdevconfig=nbdevconfig, _storage_type=storage_type, _storage_server=storage_server, _sts_config_file=sts_config_file)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        if e.returncode != 29:
            logger.error("Ret:%d with message:%s", e.returncode, e.output)
            return R_FAIL
    return R_SUCESS

def get_lsu_name():
    cloud_config = '{_mount_point}/etc/puredisk/cloudlsu.cfg'.format(_mount_point=args.mount_point)
    try:
        with open(cloud_config, 'r') as f:
            cloud_lsu_identity = json.loads(f.read())
            global msdp_cloud_dsid
            global cloud_lsu_name
            msdp_cloud_dsid = cloud_lsu_identity["lsuList"][0]['dsid']
            cloud_lsu_name = cloud_lsu_identity["lsuList"][0]['lsuName']
    except Exception as ex:
        logger.error('Failed to read cloud config file. Exception: %s', ex)
        return None
    return cloud_lsu_name

def check_kms_status_in_msdp_cloud():
    if msdp_cloud_dsid < 1 :
        logger.error('dsid in MSDP-Cloud is wrong')
        return R_FAIL

    kms_cloud_config = '{_mount_point}/etc/puredisk/kms_cloud.cfg.{_dsid}'.format(_mount_point=args.mount_point, _dsid=msdp_cloud_dsid)
    try:
        with open(kms_cloud_config, 'r') as f:
            kms_cloud_parameters = json.loads(f.read())
            kms_status_in_cloud = kms_cloud_parameters["kms"]
            if kms_status_in_cloud:
                print('''
*****************************
KMS encryption has been enabled in this server.
Ensure that the Key Management Service is correctly configured or the image sharing might fail.
If Key Management Service is not configured before running this script, please configure Key 
Management Service right now and then restart NetBackup.
*****************************
''')
    except Exception as ex:
        logger.error('Failed to read KMS config file. Exception: %s', ex)
        return R_FAIL

    return R_SUCESS

def run_cmd(cmd):
    logger.info("[CMD]:%s", " ".join(cmd))
    try:
        subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL
    return R_SUCESS

def set_msdp_cloud_cache_size(cache_name, cache_value):
    pdcfg = "/usr/openv/pdde/pdag/bin/pdcfg"
    content_router = "{_storage_path}/etc/puredisk/contentrouter.cfg".format(_storage_path=storage_path)

    cmd_str = '{_pdcfg} --write="{_content_router}" --section=CRDataStore --option={_cache_name} --value={_cache_value}GiB'\
        .format(_pdcfg=pdcfg, _content_router=content_router, _cache_name=cache_name, _cache_value=cache_value)
    cmd = shlex.split(cmd_str)
    return run_cmd(cmd)

def create_storage_server():
    cloud_provider = args.cloud_provider
    storage_server = args.storage_server
    storage_type = args.storage_type
    key_id =  args.key_id
    secret_key = args.secret_key
    mount_point = args.mount_point
    bucket_name =  args.bucket_name
    bucket_sub_name =  args.bucket_sub_name
    cloud_lsu_name = args.bucket_sub_name
    region = args.region
    cld_instance = args.cloud_instance
    provider_type = args.provider_type
    s3_host = args.s3_host
    http_port = args.s3_http_port
    https_port = args.s3_https_port

    # Set ssl used by csconfig
    if args.ssl == '0':
        ssl = '0'
    else:
        ssl = '2'
 
    nbdevconfig = "/usr/openv/netbackup/bin/admincmd/nbdevconfig"
    tpconfig = "/usr/openv/volmgr/bin/tpconfig"
    bpstuadd = "/usr/openv/netbackup/bin/admincmd/bpstuadd"
    csconfig = '/usr/openv/netbackup/bin/admincmd/csconfig'

    # Third-party S3
    if cloud_provider == CLOUD_PROVIDERS[CP_STR_CUSTOM_S3] and (not nbu_instance):
        # Create cloud instance first
        cmd_str = "{_csconfig} cldinstance -a -in {_cld_instance} -pt {_provider_type} -sh {_server_host} -http_port {_http_port} -https_port {_https_port}" \
              .format(_csconfig=csconfig, _cld_instance=cld_instance, _provider_type=provider_type, _server_host=s3_host, _http_port=http_port, _https_port=https_port)
        logger.info("[CMD]:%s", cmd_str)
        cmd = shlex.split(cmd_str)
        try:
            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
        except subprocess.CalledProcessError as e:
            if e.returncode != 1:
                logger.error("Ret:%d with message:%s", e.returncode, e.output)
                return R_FAIL

    # AWS S3 IAM role
    if cloud_provider == CLOUD_PROVIDERS[CP_STR_AWS_S3] and key_id == "dummy" and secret_key == 'dummy':
        cmd_str = "{_csconfig} cldinstance -as -in {_cld_instance} -sts {_storage_server} -creds_broker CREDS_ROLE -role_method ec2role" \
            .format(_csconfig=csconfig, _cld_instance=cld_instance, _storage_server=storage_server)
    else:
        cmd_str = "{_csconfig} cldinstance -as -in {_cld_instance} -sts {_storage_server} -ssl {_ssl}" \
            .format(_csconfig=csconfig, _cld_instance=cld_instance, _storage_server=storage_server, _ssl=ssl)
    cmd_str += " -lsu_name {_lsu_name}".format(_lsu_name=cloud_lsu_name)
    if region:
        cmd_str += " -lc {_region}".format(_region=region)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        if e.returncode != 1:
            logger.error("Ret:%d with message:%s", e.returncode, e.output)
            return R_FAIL

    cmd_str = "{_nbdevconfig} -creatests -storage_server {_storage_server} -stype {_storage_type} -media_server {_storage_server} -st 9" \
              .format(_nbdevconfig=nbdevconfig, _storage_server=storage_server, _storage_type=storage_type)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    cmd_str = "{_tpconfig} -dsh -stype {_storage_type}".format(_tpconfig=tpconfig, _storage_type=storage_type)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    sts_user_id = key_id[:32]
    sts_passwd = secret_key[:32]

    cmd_str = "{_tpconfig} -add -storage_server {_storage_server} -stype {_storage_type} -sts_user_id {_sts_user_id}".format(_tpconfig=tpconfig, _storage_server=storage_server, _storage_type=storage_type, _sts_user_id=sts_user_id)
    logger.info("[CMD]:%s","{_tpconfig} -add -storage_server {_storage_server} -stype {_storage_type} -sts_user_id xxx".format(_tpconfig=tpconfig, _storage_server=storage_server, _storage_type=storage_type))
    cmd = shlex.split(cmd_str)
    proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=False, universal_newlines=True)
    proc.stdin.write("%s\n%s\n"%(sts_passwd, sts_passwd))
    proc.stdin.flush()
    output, _ = proc.communicate()
    status = int(proc.returncode)
    if(status != 0 and status != 99):
        logger.error("Ret:%d with message:%s", status, output)
        return R_FAIL

    cmd_str = "{_nbdevconfig} -getconfig -stype {_storage_type} -storage_server {_storage_server}".format(_nbdevconfig=nbdevconfig, _storage_type=storage_type, _storage_server=storage_server)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    config_str = 'V7.5 "storagepath" "{_storage_path}" string\nV7.5 "dbpath" "{_storage_path}" string\nV7.5 "spalogin" "{_spalogin}" string\n\
V7.5 "spapasswd" "{_spapasswd}" string\nV7.5 "imagesharingincloud" "true" string'.format(_storage_path=mount_point, _spalogin=sts_user_id, _spapasswd=sts_passwd)

    ret = setconfig(config_str)
    if R_SUCESS != ret:
        return ret

    ret = set_msdp_cloud_cache_size('CloudUploadCacheSize', '1')
    if R_SUCESS != ret:
        return ret
    ret = set_msdp_cloud_cache_size('CloudMetaCacheSize', args.cloud_meta_cache_size)
    if R_SUCESS != ret:
        return ret
    ret = set_msdp_cloud_cache_size('CloudDataCacheSize', args.cloud_data_cache_size)
    if R_SUCESS != ret:
        return ret
    ret = set_msdp_cloud_cache_size('CloudMapCacheSize', args.cloud_map_cache_size)
    if R_SUCESS != ret:
        return ret

    cld_alias = storage_server + '_' + cloud_lsu_name
    config_str = 'V7.5 "operation" "reuse-lsu-cloud" string\nV7.5 "lsuCloudAlias" "{_cld_alias}" string\nV7.5 "lsuCloudUser" "{_key_id}" string\n\
V7.5 "lsuCloudPassword" "{_secret_key}" string\nV7.5 "lsuCloudBucketName" "{_bucket_name}" string\nV7.5 "lsuCloudBucketSubName" "{_bucket_sub_name}" string'\
        .format(_cld_alias=cld_alias, _key_id=key_id, _secret_key=secret_key, _bucket_name=bucket_name, _bucket_sub_name=bucket_sub_name)
    config_str += '\nV7.5 "lsuKmsServerName" "{_kms_server_name}" string'.format(_kms_server_name=storage_server)
    ret = setconfig(config_str)
    if R_SUCESS != ret:
        return ret

    lsu_name = get_lsu_name()
    if lsu_name is None:
        return R_FAIL

    cmd_str = "{_nbdevconfig} -previewdv -storage_server {_storage_server} -stype {_storage_type} -media_server {_storage_server} -dv {_lsu_name} -dvlist {_dvlist_file}" \
        .format(_nbdevconfig=nbdevconfig, _storage_type=storage_type, _storage_server=storage_server, _lsu_name=lsu_name, _dvlist_file=dvlist_file)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    dp_name  = get_dp_name()
    stu_name = get_stu_name()
    cmd_str = "{_nbdevconfig} -createdp -storage_servers {_storage_server} -stype {_storage_type} -dp {_dp} -dvlist {_dvlist_file}" \
        .format(_nbdevconfig=nbdevconfig, _storage_type=storage_type, _storage_server=storage_server, _dp=dp_name, _dvlist_file=dvlist_file)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    logger.info("NOTE: If the LSU is migrated from CloudCatalyst, need to run cacontrol --catalog buildcloudcatalystobjects <lsu name> \
                    command to successfully import cloudcatalyst images")

    cmd_str = "{_bpstuadd} -label {_stu} -odo 0 -dt 6 -dp {_dp} -nodevhost -cj 20 -mfs 51200 ".format(_bpstuadd=bpstuadd, _stu=stu_name, _dp=dp_name)
    logger.info("[CMD]:%s", cmd_str)
    cmd = shlex.split(cmd_str)
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
    except subprocess.CalledProcessError as e:
        logger.error("Ret:%d with message:%s", e.returncode, e.output)
        return R_FAIL

    check_kms_status_in_msdp_cloud()

    if(args.kms_enabled == '1'):
        nb_pd_server = "{_storage_path}/data/0/64.bin".format(_storage_path=storage_path)
        wait_time = 30
        while wait_time > 0:
            if os.access(nb_pd_server,os.F_OK):
                break
            else:
                time.sleep(5)
                wait_time -= 5

    return status


# print(args.storage_server, args.storage_type, args.fs_name, args.mount_point, args.region)

set_logger()

ret = check_input_args()
if ret != R_SUCESS:
    sys.exit(1)

ret = check_hostname()
if ret == R_SUCESS:
    logger.info("Confirm hostname same to FQDN")
else:
    logger.info("Quit to reset the hostname")
    sys.exit(1)

logger.info("Begin configuring web service...")
ret = configure_msdp_webservice()
if ret == R_SUCESS:
    logger.info("Completed configuring web service.")
else:
    logger.info("Aborted configuring web service.")
    sys.exit(1)

logger.info("Begin creating storage server...")
ret = create_storage_server()
if ret == R_SUCESS:
    logger.info("Completed creating storage server.")
else:
    logger.info("Aborted creating storage server.")
    sys.exit(1)

logger.info("Begin verifying web service...")
ret = verify_msdp_webservice()
if ret == R_SUCESS:
    logger.info("Completed verifying web service.")
else:
    logger.info("Aborted verifying web service.")
    sys.exit(1)
