#!/usr/bin/python3
import argparse
import yaml
import subprocess
import json
import re
import sys

EKS = "eks"
GKE = "gke"
MKS = "mks"
AKS = "aks"

# TODO: Add more config flags as needed to your CSP
KOSMOS_CLUSTER_RESOURCE_NAMES = {EKS: "EKSCluster", GKE: "GKECluster", MKS: "MKSCluster", AKS: "AKSCluster"}
RESOURCE_CONFIG_KEY_NAME = {EKS: "eksConfig", GKE: "gkeConfig", MKS: "mksConfig", AKS: "aksConfig"}
SPEC_KEY = "spec"

CLUSTER_NAME_FLAG = "cluster-name"
FLEET_ID_FLAG = "fleet"
TYPE_FLAG = "type"
GKE_CLUSTER_REGION_FLAG = "gke-cluster-region"
GKE_SERVICE_ACCOUNT_FLAG = "gke-service-account"
GKE_WORKLOAD_IDENTITY_POOL_FLAG = "gke-workload-identity-pool"
GKE_WORKLOAD_IDENTITY_PROVIDER_FLAG = "gke-workload-identity-provider"
GKE_PROJECT_ID_FLAG = "gke-project-id"
REQUIRED_GKE_FLAGS = [GKE_CLUSTER_REGION_FLAG, GKE_SERVICE_ACCOUNT_FLAG, GKE_WORKLOAD_IDENTITY_POOL_FLAG, GKE_WORKLOAD_IDENTITY_PROVIDER_FLAG, GKE_PROJECT_ID_FLAG]

#EKS FLAGS
EKS_REGION_FLAG = "eks-region"
EKS_KOSMOS_ROLE_ARN_FLAG = "eks-kosmos-role-arn"
EKS_PROFILE_FLAG = "eks-profile"
REQUIRED_EKS_FLAGS = [EKS_REGION_FLAG, EKS_KOSMOS_ROLE_ARN_FLAG]

MKS_REGION_FLAG = "mks-region"
MKS_KOSMOS_ROLE_ARN_FLAG = "mks-kosmos-role-arn"
MKS_PROFILE_FLAG = "mks-profile"
REQUIRED_MKS_FLAGS = [MKS_REGION_FLAG, MKS_KOSMOS_ROLE_ARN_FLAG]

AKS_CLUSTER_RESOURCE_GROUP_FLAG = "aks-cluster-resource-group" 
AKS_CLIENT_NAME_FLAG = "aks-client-name"
AKS_CLIENT_RESOURCE_GROUP_FLAG = "aks-client-resource-group"
REQUIRED_AKS_FLAGS = [AKS_CLUSTER_RESOURCE_GROUP_FLAG, AKS_CLIENT_NAME_FLAG, AKS_CLIENT_RESOURCE_GROUP_FLAG]

def init_skeleton(type: str, cluster_name: str, fleet_name: str) -> dict:
    return {
        "apiVersion": "storage.kosmos.spcplatform.com/v1",
        "kind": KOSMOS_CLUSTER_RESOURCE_NAMES[type],
        "metadata": {
            "name": cluster_name,
            "namespace": fleet_name
        },
        "spec": {
            "name": cluster_name,
            RESOURCE_CONFIG_KEY_NAME[type]: {}
        }
    }

def get_user_arguments(args: list) -> dict:
    parser = argparse.ArgumentParser()
    parser.add_argument(f"--{TYPE_FLAG}",
                        required=True,
                        help="Type of the cluster to be imported, valid values are eks, gke, mks, and aks",
                        choices=[EKS, GKE, MKS, AKS],
                        type=str.lower,
                        dest=TYPE_FLAG)
    parser.add_argument(f"--{CLUSTER_NAME_FLAG}", 
                        required=True,
                        help="Name of the cluster to be imported into Kosmos",
                        dest=CLUSTER_NAME_FLAG)
    parser.add_argument(f"--{FLEET_ID_FLAG}", 
                        required=True,
                        help="Name of the fleet where the cluster will imported towards",
                        dest=FLEET_ID_FLAG)
    # TODO: Add more flags based on what's needed for each type of cluster e.g. AWS kosmos role ARN
    # EKS flags
    parser.add_argument(f"--{EKS_PROFILE_FLAG}", 
                        help="[Optional, applies if --type=eks] AWS CLI Profile Used",
                        dest=EKS_PROFILE_FLAG)
    
    parser.add_argument(f"--{EKS_REGION_FLAG}", 
                        help="[Required if --type=eks] Name of the AWS Region used by the EKS",
                        dest=EKS_REGION_FLAG)
    
    parser.add_argument(f"--{EKS_KOSMOS_ROLE_ARN_FLAG}", 
                        help="[Required if --type=eks] ARN Role for Kosmos Service",
                        dest=EKS_KOSMOS_ROLE_ARN_FLAG)

    # GKE flags
    parser.add_argument(f"--{GKE_CLUSTER_REGION_FLAG}",
                        help="[Required if --type=gke] GCP region of the GKE cluster to be imported",
                        dest=GKE_CLUSTER_REGION_FLAG)
    parser.add_argument(f"--{GKE_SERVICE_ACCOUNT_FLAG}",
                        help="[Required if --type=gke] Email address of the GCP service account to be assumed by Kosmos towards GCP",
                        dest=GKE_SERVICE_ACCOUNT_FLAG)
    parser.add_argument(f"--{GKE_WORKLOAD_IDENTITY_POOL_FLAG}",
                        help="[Required if --type=gke] Name of the workload identity pool to be used by Kosmos for role assumption towards GCP",
                        dest=GKE_WORKLOAD_IDENTITY_POOL_FLAG)
    parser.add_argument(f"--{GKE_WORKLOAD_IDENTITY_PROVIDER_FLAG}",
                        help="[Required if --type=gke] Name of the workload identity provider to be used by Kosmos for role assumption towards GCP",
                        dest=GKE_WORKLOAD_IDENTITY_PROVIDER_FLAG)
    parser.add_argument(f"--{GKE_PROJECT_ID_FLAG}",
                        help="[Required if --type=gke] ID of the GCP project where the GKE cluster is located",
                        dest=GKE_PROJECT_ID_FLAG)
    
    # MKS flags
    parser.add_argument(f"--{MKS_REGION_FLAG}",
                        help="[Required if --type=mks] Name of the SPC Region used by the MKS",
                        dest=MKS_REGION_FLAG)
    parser.add_argument(f"--{MKS_KOSMOS_ROLE_ARN_FLAG}", 
                        help="[Required if --type=mks] ARN Role for Kosmos Service",
                        dest=MKS_KOSMOS_ROLE_ARN_FLAG)
    parser.add_argument(f"--{MKS_PROFILE_FLAG}",
                        help="[Optional if --type=mks] SPC profile name, the default profile will be used if not specified",
                        default="default",
                        dest=MKS_PROFILE_FLAG) 

    # AKS flags
    parser.add_argument(f"--{AKS_CLUSTER_RESOURCE_GROUP_FLAG}",
                        help="[Required if --type=aks] Azure resource group name of the AKS cluster to be imported",
                        dest=AKS_CLUSTER_RESOURCE_GROUP_FLAG)
    parser.add_argument(f"--{AKS_CLIENT_NAME_FLAG}",
                        help="[Required if --type=aks] Azure AD client name for service principal authentication",
                        dest=AKS_CLIENT_NAME_FLAG)
    parser.add_argument(f"--{AKS_CLIENT_RESOURCE_GROUP_FLAG}",
                        help="[Required if --type=aks] Azure resource group name of Azure AD client for service principal authentication",
                        dest=AKS_CLIENT_RESOURCE_GROUP_FLAG)

    return {key: value for key, value in vars(parser.parse_args(args)).items() if value != None}

def validate_eks_flags(user_inputted_flags: dict):
    if not set(REQUIRED_EKS_FLAGS).issubset(user_inputted_flags.keys()):
        raise ValueError(f"Missing flags for EKS cluster import: {list(set(REQUIRED_EKS_FLAGS) - set(user_inputted_flags.keys()))}")

def validate_gke_flags(user_inputted_flags: dict):
    if not set(REQUIRED_GKE_FLAGS).issubset(user_inputted_flags.keys()):
        raise ValueError(f"Missing flags for GKE cluster import: {list(set(REQUIRED_GKE_FLAGS) - set(user_inputted_flags.keys()))}")

def validate_mks_flags(user_inputted_flags: dict):
    # TODO: check flags and raise ValueError("Missing flags: ...") if input is wrong
    if not set(REQUIRED_MKS_FLAGS).issubset(user_inputted_flags.keys()):
        raise ValueError(f"ERROR: \nMissing flags for MKS cluster import: {list(set(REQUIRED_MKS_FLAGS) - set(user_inputted_flags.keys()))}\n")

def validate_aks_flags(user_inputted_flags: dict):
    if not set(REQUIRED_AKS_FLAGS).issubset(user_inputted_flags.keys()):
        raise ValueError(f"Missing flags for AKS cluster import: {list(set(REQUIRED_AKS_FLAGS) - set(user_inputted_flags.keys()))}")

def validate_flags(type: str, user_inputted_flags: dict):
    if type == EKS:
        validate_eks_flags(user_inputted_flags)
    elif type == GKE:
        validate_gke_flags(user_inputted_flags)
    elif type == MKS:
        validate_mks_flags(user_inputted_flags)
    elif type == AKS:
        validate_aks_flags(user_inputted_flags)

def generate_eks_config(user_inputted_flags: dict) -> dict:

    def get_aws_cli_command(cmd: str) -> dict:
        try:
            result = subprocess.run(
                cmd.split(),
                capture_output=True,
                text=True,
                check=False 
            )

            if result.returncode != 0:
                raise RuntimeError(
                    f"AWS CLI command failed ({result.returncode}):\n"
                    f"STDERR: {result.stderr.strip()}\n"
                    f"STDOUT: {result.stdout.strip()}"
                )

            try:
                return json.loads(result.stdout)
            except json.JSONDecodeError:
                raise ValueError(
                    f"Failed to parse AWS CLI output as JSON:\n{result.stdout.strip()}"
                )
        except Exception as e:
            raise RuntimeError(f"Unexpected error running AWS CLI: {e}")

    def get_launchtemplate_config(launchtemplate_id: str, launchtemplate_version: str, aws_global_flag: str) -> dict:
        launchtemplate_cmd = f"aws ec2 describe-launch-template-versions --launch-template-id {launchtemplate_id} --versions {launchtemplate_version} {aws_global_flag}"
        launchtemplate_res = get_aws_cli_command(launchtemplate_cmd)
        return launchtemplate_res["LaunchTemplateVersions"][0]

    def generate_eks_nodegroup_config(cluster_name: str, nodegroup: str, aws_global_flag: str) -> dict:
        describe_nodegroup_cmd = f"aws eks describe-nodegroup --cluster-name {cluster_name} --nodegroup-name {nodegroup} {aws_global_flag}"
        describe_nodegroup_res = get_aws_cli_command(describe_nodegroup_cmd)

        launch_template = describe_nodegroup_res["nodegroup"].get("launchTemplate",None)

        disk_size_lt = None

        if launch_template:
            launch_template["version"] = int(launch_template["version"])
            launchtemplate_data = get_launchtemplate_config(launch_template["id"], launch_template["version"], aws_global_flag)

            disk_size_lt = launchtemplate_data["LaunchTemplateData"].get("BlockDeviceMappings",None)
            if disk_size_lt != None and len(disk_size_lt) > 0:
                ebs_config = disk_size_lt[0].get("Ebs")
                if ebs_config is not None:
                    disk_size_lt = ebs_config.get("VolumeSize", None)
                else:
                    disk_size_lt = None
            else:
                disk_size_lt = None

        # Use launch template disk size if available, otherwise use nodegroup disk size
        if disk_size_lt is not None:
            disk_size = disk_size_lt
        else:
            disk_size = describe_nodegroup_res["nodegroup"].get("diskSize", None)
        
        if disk_size == None:
            # If disk size is not defined it will use the default amount 20GB
            disk_size = 20

        nodegroup = {
            "nodegroupName" : describe_nodegroup_res["nodegroup"]["nodegroupName"],
            "gpu"           : "GPU"in describe_nodegroup_res["nodegroup"]["amiType"] or "NVIDIA" in describe_nodegroup_res["nodegroup"]["amiType"],
            "arm"           : "ARM" in describe_nodegroup_res["nodegroup"]["amiType"],
            "resourceTags"  : describe_nodegroup_res["nodegroup"].get("tags",{}),
            "diskSize"      : disk_size,
            "version"       : describe_nodegroup_res["nodegroup"]["version"],
            "minSize"       : describe_nodegroup_res["nodegroup"]["scalingConfig"]["minSize"],
            "maxSize"       : describe_nodegroup_res["nodegroup"]["scalingConfig"]["maxSize"],
            "desiredSize"   : describe_nodegroup_res["nodegroup"]["scalingConfig"]["desiredSize"],
            "tags"          : describe_nodegroup_res["nodegroup"].get("tags",{}).copy(),
            "labels"        : describe_nodegroup_res["nodegroup"].get("labels",{}).copy(),
            "launchTemplate": launch_template
        } 
        return nodegroup

    aws_region = user_inputted_flags[EKS_REGION_FLAG]
    kosmos_role_arn = user_inputted_flags[EKS_KOSMOS_ROLE_ARN_FLAG]
    fleet = user_inputted_flags[FLEET_ID_FLAG]
    cluster_name = user_inputted_flags[CLUSTER_NAME_FLAG]
    aws_global_flag = f"--region {aws_region}"
    if user_inputted_flags.get(EKS_PROFILE_FLAG, None):
        aws_global_flag += f" --profile {user_inputted_flags[EKS_PROFILE_FLAG]}"

    # Describe Cluster
    describe_cluster_cmd = f"aws eks describe-cluster --name {cluster_name} {aws_global_flag}"
    describe_cluster_res = get_aws_cli_command(describe_cluster_cmd)

    # Get List nodegroups
    list_nodegroups_cmd = f"aws eks list-nodegroups --cluster-name {cluster_name} {aws_global_flag}"
    list_nodegroups_res = get_aws_cli_command(list_nodegroups_cmd)

    # Get List of enabled log types
    logging_types = []
    for logs in describe_cluster_res["cluster"]["logging"]["clusterLogging"]:
            if logs["enabled"]:
                logging_types += logs["types"]

    eks_config = {
        "displayName"    : cluster_name,
        "imported"       : True,
        "kosmosRoleArn"  : kosmos_role_arn,
        "region"         : aws_region,
        "kubernetesVersion"    : describe_cluster_res["cluster"]["version"] ,
        "publicAccess"         : describe_cluster_res["cluster"]["resourcesVpcConfig"]["endpointPublicAccess"],
        "privateAccess"        : describe_cluster_res["cluster"]["resourcesVpcConfig"]["endpointPrivateAccess"],
        "loggingTypes"         : logging_types,
        "tags"                 : describe_cluster_res["cluster"]["tags"].copy(),
        "publicAccessSources"  : describe_cluster_res["cluster"]["resourcesVpcConfig"]["publicAccessCidrs"],
        "nodeGroups"           : [ generate_eks_nodegroup_config(cluster_name, nodegroup, aws_global_flag) for nodegroup in list_nodegroups_res["nodegroups"] ]
    }
    return eks_config

def generate_gke_config(user_inputted_flags: dict) -> dict:
    gke_region = user_inputted_flags[GKE_CLUSTER_REGION_FLAG]
    gke_cluster_name = user_inputted_flags[CLUSTER_NAME_FLAG]
    result = subprocess.run(f"gcloud container clusters describe --format json --region {gke_region} {gke_cluster_name}".split(),
                            capture_output=True, text=True, check=False)

    if result.returncode != 0:
        raise RuntimeError(result.stderr)

    get_project_num = subprocess.run(f"gcloud projects describe {user_inputted_flags[GKE_PROJECT_ID_FLAG]} --format=value(projectNumber)".split(),
                                        capture_output=True, text=True, check=False)

    if get_project_num.returncode != 0:
        raise RuntimeError(get_project_num.stderr)

    gke_describe_result = json.loads(result.stdout)
    gke_project_number = get_project_num.stdout
    gke_config = {
        "imported": True,
        "clusterName": gke_cluster_name,
        "clusterAddons": {
            "horizontalPodAutoscaling": True if gke_describe_result["addonsConfig"].get("horizontalPodAutoscaling", {}) == {} else False,
            "httpLoadBalancing": True if gke_describe_result["addonsConfig"].get("httpLoadBalancing", {}) == {} else False,
            "networkPolicyConfig": True if gke_describe_result["addonsConfig"].get("networkPolicyConfig", {}) == {} else False
        },
        "keyName": gke_describe_result["databaseEncryption"].get("keyName", None),
        "kubernetesVersion": gke_describe_result["currentMasterVersion"],
        "labels": gke_describe_result.get("resourceLabels", {}),
        "locations": gke_describe_result["locations"],
        "loggingService": gke_describe_result["loggingService"],
        "maintenanceWindow": gke_describe_result["maintenancePolicy"]
            .get("window", {})
            .get("dailyMaintenanceWindow", {})
            .get("startTime", None), 
        "monitoringService": gke_describe_result["monitoringService"],
        "networkPolicyEnabled": gke_describe_result.get("networkPolicy", {}).get("enabled", False),

        "privateClusterConfig": {
            "enablePrivateEndpoint": gke_describe_result["privateClusterConfig"].get("enablePrivateEndpoint", False),
            "enablePrivateNodes": gke_describe_result["privateClusterConfig"].get("enablePrivateNodes", False),
            "masterIpv4CidrBlock": gke_describe_result["privateClusterConfig"].get("masterIpv4CidrBlock", "PLACEHOLDER") # "" or null doesn't work
        },
        "projectNumber": gke_project_number.strip(),
        "projectID": user_inputted_flags[GKE_PROJECT_ID_FLAG],
        "serviceAccount": user_inputted_flags[GKE_SERVICE_ACCOUNT_FLAG],
        "workloadIdentityPoolId": user_inputted_flags[GKE_WORKLOAD_IDENTITY_POOL_FLAG],
        "workloadIdentityProviderId": user_inputted_flags[GKE_WORKLOAD_IDENTITY_PROVIDER_FLAG],
    }

    if re.search(r"-[a-z]$", gke_region):
        gke_config["zone"] = gke_region
    else:
        gke_config["region"] = gke_region

    if gke_describe_result["masterAuthorizedNetworksConfig"].get("enabled", False):
        gke_config["masterAuthorizedNetworks"] = {
            "enabled": True,
            "cidrBlocks": gke_describe_result["masterAuthorizedNetworksConfig"]["cidrBlocks"]
        }

    if gke_describe_result["autopilot"] == {}:
        gke_config["nodePools"] = [{
            "initialNodeCount": node_pool["initialNodeCount"],
            "name": node_pool["name"],
            "version": node_pool["version"],
            "autoscaling": {
                key: value for key, value in node_pool["autoscaling"].items() if key in ["enabled", "maxNodeCount", "minNodeCount"]
            },
            "config": {
                "diskSizeGb": int(node_pool["config"]["bootDisk"]["sizeGb"]),
                "diskType": node_pool["config"]["bootDisk"]["diskType"],
                "imageType": node_pool["config"]["imageType"],
                "labels": node_pool["config"].get("labels", {}),
                "machineType": node_pool["config"]["machineType"],
                "oauthScopes": node_pool["config"]["oauthScopes"],
                "tags": node_pool["config"].get("tags", []),
                "taints": node_pool["config"].get("taints", [])
            },
            "management": {
                key: value for key, value in node_pool["management"].items() if key in ["autoRepair", "autoUpgrade"]
            },
            "maxPodsConstraint": int(node_pool["maxPodsConstraint"]["maxPodsPerNode"])
        } for node_pool in gke_describe_result["nodePools"]
        ]

    return gke_config

def generate_mks_config(user_inputted_flags: dict) -> dict:
    # TODO: Run AWS SPC CLI command and parse output to a dictionary to be used as mksConfig
    # Throw RuntimeError("<details of error to user>") if CLI command fails (unauthenticated, cluster not found, etc.)

    def describe_cluster() -> dict:
        command = f"""
            aws eks describe-cluster \
                --name {user_inputted_flags[CLUSTER_NAME_FLAG]} \
                --region {user_inputted_flags[MKS_REGION_FLAG]} \
                --endpoint https://eks.{user_inputted_flags[MKS_REGION_FLAG]}.samsungspc.com \
                --profile {user_inputted_flags[MKS_PROFILE_FLAG]}"""
        try:
            res = subprocess.run(command.split(), check=True, capture_output=True, text=True)
            res = json.loads(res.stdout)

            logging_types = []
            for logs in res["cluster"]["logging"]["clusterLogging"]:
                if logs["enabled"]:
                    for log in logs["types"]:
                        logging_types.append(log)

            cluster_config = {
                "kubernetesVersion"   : res["cluster"]["version"],
                "tags"                : res["cluster"]["tags"],
                "loggingTypes"        : logging_types,
                "publicAccess"        : res["cluster"]["resourcesVpcConfig"]["endpointPublicAccess"],
                "privateAccess"       : res["cluster"]["resourcesVpcConfig"]["endpointPrivateAccess"],
                "publicAccessSources" : res["cluster"]["resourcesVpcConfig"]["publicAccessCidrs"],
            }

            return cluster_config
        except subprocess.CalledProcessError as err:
            raise RuntimeError(err.stderr)

    def describe_node_groups() -> list:
        list_node_command = f"""
            aws eks list-nodegroups \
                --cluster-name {user_inputted_flags[CLUSTER_NAME_FLAG]} \
                --region {user_inputted_flags[MKS_REGION_FLAG]} \
                --endpoint https://eks.{user_inputted_flags[MKS_REGION_FLAG]}.samsungspc.com \
                --profile {user_inputted_flags[MKS_PROFILE_FLAG]}"""
        try:
            node_config = []
            list_node = subprocess.run(list_node_command.split(), check=True, capture_output=True, text=True)
            for node in json.loads(list_node.stdout)["nodegroups"]:
                desc_node_command = f"""aws eks describe-nodegroup \
                    --cluster-name {user_inputted_flags[CLUSTER_NAME_FLAG]} \
                    --nodegroup-name {node} \
                    --region {user_inputted_flags[MKS_REGION_FLAG]} \
                    --endpoint https://eks.{user_inputted_flags[MKS_REGION_FLAG]}.samsungspc.com \
                    --profile {user_inputted_flags[MKS_PROFILE_FLAG]}"""
                res = subprocess.run(desc_node_command.split(), check=True, capture_output=True, text=True)
                res = json.loads(res.stdout)
                node_group_contents = {
                    "nodeGroupName" : res["nodegroup"]["nodegroupName"],
                    "diskSize"      : res["nodegroup"]["diskSize"],
                    "instanceType"  : res["nodegroup"]["instanceTypes"][0],
                    "version"       : res["nodegroup"]["version"],
                    "minSize"       : res["nodegroup"]["scalingConfig"]["minSize"],
                    "maxSize"       : res["nodegroup"]["scalingConfig"]["maxSize"],
                    "desiredSize"   : res["nodegroup"]["scalingConfig"]["desiredSize"],
                    "nodeRole"      : (res["nodegroup"]["nodeRole"]).split("role/")[1],
                    "tags"          : res["nodegroup"]["tags"],
                    "labels"        : res["nodegroup"]["labels"],
                }
                
                launch_template = res["nodegroup"].get("launchTemplate",None)
                if launch_template:
                    node_group_contents["launchTemplate"] = {
                        "id" : launch_template["id"],
                        "version" : int(launch_template["version"]),
                        "name" : launch_template["name"]
                    }
                node_config.append(node_group_contents)

            return node_config
        except subprocess.CalledProcessError as err:
            raise RuntimeError(err.stderr)


    cluster_config = describe_cluster()
    cluster_config["nodeGroups"] = describe_node_groups()

    required_config = {
        "displayName"   : user_inputted_flags[CLUSTER_NAME_FLAG],
        "imported"      : True,
        "kosmosRoleArn" : user_inputted_flags[MKS_KOSMOS_ROLE_ARN_FLAG],
        "region"        : user_inputted_flags[MKS_REGION_FLAG],
    }
    return {**required_config, **cluster_config}

def generate_aks_config(user_inputted_flags: dict) -> dict:
    aks_cluster_name = user_inputted_flags[CLUSTER_NAME_FLAG]
    aks_resource_group = user_inputted_flags[AKS_CLUSTER_RESOURCE_GROUP_FLAG]
    client_name = user_inputted_flags[AKS_CLIENT_NAME_FLAG]
    client_resource_group = user_inputted_flags[AKS_CLIENT_RESOURCE_GROUP_FLAG]
    
    get_azure_id = subprocess.run(f"az account show".split(), capture_output=True, text=True, check=False)
    if get_azure_id.returncode != 0:
        raise RuntimeError(get_azure_id.stderr)

    get_aks_desc = subprocess.run(f"az aks show --name {aks_cluster_name} --resource-group {aks_resource_group}".split(),
                            capture_output=True, text=True, check=False)
    if get_aks_desc.returncode != 0:
        raise RuntimeError(get_aks_desc.stderr)

    get_client_desc = subprocess.run(f"az identity show --name {client_name} --resource-group {client_resource_group}".split(),
                            capture_output=True, text=True, check=False)
    if get_client_desc.returncode != 0:
        raise RuntimeError(get_client_desc.stderr)

    azure_id_describe_result = json.loads(get_azure_id.stdout)
    aks_describe_result = json.loads(get_aks_desc.stdout)
    client_describe_result = json.loads(get_client_desc.stdout)
    
    aks_config = {
        "imported": True,
        "clusterName": aks_cluster_name,
        "resourceGroup": aks_resource_group,
        
        "clientID": client_describe_result["clientId"],
        "subscriptionID": azure_id_describe_result["id"],
        "tenantID": azure_id_describe_result["tenantId"],

        "authorizedIpRanges": aks_describe_result["apiServerAccessProfile"]["authorizedIpRanges"],
        "httpApplicationRouting": aks_describe_result["addonProfiles"].get("httpApplicationRouting", {}).get("enabled", False) if aks_describe_result.get("addonProfiles") else False,
        "kubernetesVersion": aks_describe_result["kubernetesVersion"],
        "monitoring": aks_describe_result["addonProfiles"].get("omsAgent", {}).get("enabled", False) if aks_describe_result.get("addonProfiles") else False,
        "resourceLocation": aks_describe_result["location"],
        "tags": aks_describe_result.get("tags"),
    }
    
    aks_config["nodePools"] = [{
        "availabilityZones": node_pool["availabilityZones"],
        "count": node_pool["count"],
        "enableAutoScaling": node_pool["enableAutoScaling"],
        "mode": node_pool["mode"],
        "maxCount": node_pool["maxCount"],
        "minCount": node_pool["minCount"],
        "maxPods": node_pool["maxPods"],
        "maxSurge": node_pool["upgradeSettings"]["maxSurge"],
        "orchestratorVersion": node_pool["orchestratorVersion"],
        "osDiskSizeGB": node_pool["osDiskSizeGb"],
        "osDiskType": node_pool["osDiskType"],
        "osType": node_pool["osType"],
        "name": node_pool["name"],
        "nodeLabels": node_pool["nodeLabels"],
        "nodeTaints": node_pool["nodeTaints"],
        "tags": node_pool["tags"],
        "vmSize": node_pool["vmSize"],
        "vnetSubnetID": node_pool["vnetSubnetId"],
    } for node_pool in aks_describe_result["agentPoolProfiles"]]
    
    return aks_config


def generate_config(type: str, skeleton: dict, user_inputted_flags: dict) -> dict:
    if type == EKS:
        imported_cluster_config = generate_eks_config(user_inputted_flags)
    elif type == GKE:
        imported_cluster_config = generate_gke_config(user_inputted_flags)
    elif type == MKS:
        imported_cluster_config = generate_mks_config(user_inputted_flags)
    elif type == AKS:
        imported_cluster_config = generate_aks_config(user_inputted_flags)

    skeleton[SPEC_KEY][RESOURCE_CONFIG_KEY_NAME[type]] = imported_cluster_config
    return skeleton

def main():
    user_args = get_user_arguments(sys.argv[1:])
    cluster_type = user_args[TYPE_FLAG]

    try:
        validate_flags(cluster_type, user_args)
        generated_config = generate_config(cluster_type, 
                                       init_skeleton(cluster_type, user_args[CLUSTER_NAME_FLAG], user_args[FLEET_ID_FLAG]),
                                       user_args)    
        print(yaml.dump(generated_config))
    except ValueError as err:
        print(err)
        exit(1)
    except RuntimeError as err:
        print(err)
        exit(1)

if __name__ == "__main__": # pragma: no cover
    main()
