import logging
import os
from typing import Any

from dotenv import load_dotenv

from evoprompt.cli import argument_parser
from evoprompt.evolution import get_optimizer_class
from evoprompt.models import Llama, LlamaChat, LLMModel
from evoprompt.task import get_task
from evoprompt.utils import init_rng, setup_console_logger

logger = logging.getLogger(__name__)

load_dotenv()


def conv2bool(_str: Any):
    if isinstance(_str, bool):
        return _str
    if str(_str).lower() in ["1", "true"]:
        return True
    if str(_str).lower() in ["0", "false"]:
        return False
    return None


if __name__ == "__main__":
    # set additional CLI run arguments
    argument_parser.add_argument(
        "-v", "--verbose", action="count", default=0, help="Increase verbosity"
    )
    argument_parser.add_argument(
        "-s",
        "--seed",
        type=int,
        default=42,
        help="Set seed for random number generator (rng). ",
    )
    options = argument_parser.parse_args()

    # set up console logging and rnd
    setup_console_logger(verbosity_level=options.verbose)
    init_rng(options.seed)

    # log cli arguments
    logger.info(
        "CLI arguments:\n\tPositional:%s\n\tKeyword:\n\t\t%s",
        ", ".join(options._get_args()),
        "\n\t\t".join((f"{param}={value}" for param, value in options._get_kwargs())),
    )

    # debug mode will allow for a quick run
    debug = options.debug
    if debug is None:
        debug = conv2bool(os.getenv("EP_DEBUG", False))
        if debug is None:
            raise ValueError(
                f"'{os.getenv('EP_DEBUG')}' is not allowed for env variable EP_DEBUG."
            )
    if debug:
        logger.info("DEBUG mode: Do a quick run")

    # # set up evolution model
    evolution_model = LLMModel.get_model(options.evolution_engine, options=options)

    match options.evolution_engine:
        case "llama":
            logger.info("Using Llama as the evolution engine")
        case "openai":
            logger.info(f"Using {options.openai_model} as the evolution engine")

    # set up evaluation model
    # NOTE currenty we always stick to Llama as evaluation engine
    # TODO allow to set separate engine and model for evaluation?
    logger.info("Using Llama as the evaluation engine")
    evaluation_model: LLMModel
    match options.evolution_engine:
        case "llama" | "llamachat":
            evaluation_model = evolution_model
        case "openai":
            evaluation_model = Llama(options)
        case "openaichat":
            evaluation_model = LlamaChat(options)

    task = get_task(options.task, evaluation_model, **options.__dict__)
    logger.info(f"Running with task {task.__class__.__name__}")

    logger.info("Using evolutionary algorithm '%s'", options.evolution_algorithm)

    optimizer_class = get_optimizer_class(options.evolution_algorithm)
    optimizer = optimizer_class(
        population_size=10,
        task=task,
        evolution_model=evolution_model,
        evaluation_model=evaluation_model,
        run_options=options.__dict__,
    )
    optimizer.run(10, debug=debug)