diff --git a/main.py b/main.py
index 2a9a7dd8fe3925be500b742b84a3e7ca49eb0385..4f46baf754e984bc063aebb00b8fca42818566c1 100644
--- a/main.py
+++ b/main.py
@@ -5,7 +5,7 @@ from dotenv import load_dotenv
 
 from cli import argument_parser
 from evolution import get_optimizer_class
-from models import Llama3, LLMModel
+from models import Llama, LLMModel
 from task import get_task
 from utils import logger
 
@@ -25,6 +25,22 @@ def conv2bool(_str: Any):
 if __name__ == "__main__":
     options = argument_parser.parse_args()
 
+    # log cli arguments
+    logger.info(
+        "CLI arguments:\n\tPositional:%s\n\tKeyword:\n\t\t%s",
+        ", ".join(options._get_args()),
+        "\n\t\t".join((f"{param}={value}" for param, value in options._get_kwargs())),
+    )
+
+    # debug mode will allow for a quick run
+    debug = options.debug
+    if debug is None:
+        debug = conv2bool(os.getenv("EP_DEBUG", False))
+        if debug is None:
+            raise ValueError(
+                f"'{os.getenv('EP_DEBUG')}' is not allowed for env variable EP_DEBUG."
+            )
+
     # set up evolution model
     evolution_model = LLMModel.get_model(options.evolution_engine, options)
 
@@ -40,26 +56,10 @@ if __name__ == "__main__":
     logger.info("Using Llama2 as the evaluation engine")
     evaluation_model: LLMModel
     match options.evolution_engine:
-        case "llama2" | "llama3":
+        case "llama":
             evaluation_model = evolution_model
         case "openai":
-            evaluation_model = Llama3(options)
-
-    # log cli arguments
-    logger.info(
-        "CLI arguments:\n\tPositional:%s\n\tKeyword:\n\t\t%s",
-        ", ".join(options._get_args()),
-        "\n\t\t".join((f"{param}={value}" for param, value in options._get_kwargs())),
-    )
-
-    # debug mode will allow for a quick run
-    debug = options.debug
-    if debug is None:
-        debug = conv2bool(os.getenv("EP_DEBUG", False))
-        if debug is None:
-            raise ValueError(
-                f"'{os.getenv('EP_DEBUG')}' is not allowed for env variable EP_DEBUG."
-            )
+            evaluation_model = Llama(options)
 
     task = get_task(options.task, evaluation_model, options)
     logger.info(
diff --git a/models.py b/models.py
index 5a89761db3d83428fde076152b7227694180dd06..02eb4e7e35d0cee9a5128b336dc35fd03ca7c559 100644
--- a/models.py
+++ b/models.py
@@ -1,13 +1,12 @@
-import abc
 import inspect
-from abc import ABC, abstractmethod, abstractproperty
+from abc import ABC, abstractmethod
 from argparse import ArgumentParser, Namespace
 from pathlib import Path
 from typing import Any, ClassVar
 
+import huggingface_hub
+import llama_cpp
 import openai
-from llama_cpp import Llama
-
 from cli import argument_parser
 from opt_types import ModelUsage
 
@@ -56,7 +55,7 @@ class LLMModel(ABC):
         pass
 
 
-class LlamaModel(LLMModel):
+class Llama(LLMModel):
 
     def __init__(
         self,
@@ -64,23 +63,38 @@ class LlamaModel(LLMModel):
         n_gpu_layers: int = 60,
         n_threads: int = 8,
         n_ctx: int = 4096,
-        verbose: bool = False,
         **kwargs,
     ) -> None:
 
-        # initialize model
-        self.model = Llama(
-            options.llama_path,
-            chat_format=self.chat_format,
-            verbose=verbose,
-            n_gpu_layers=n_gpu_layers,
-            n_threads=n_threads,
-            n_ctx=n_ctx,
-            **kwargs,
-        )
-
         super().__init__(options)
 
+        # initialize model
+        if options.llama_path is not None:
+            # use local file
+            self.model = llama_cpp.Llama(
+                options.llama_path,
+                chat_format=options.chat_format,
+                chat_handler=options.chat_handler,
+                verbose=options.verbose,
+                n_gpu_layers=n_gpu_layers,
+                n_threads=n_threads,
+                n_ctx=n_ctx,
+                **kwargs,
+            )
+        else:
+            # use pre-trained model from HF hub
+            self.model = llama_cpp.Llama.from_pretrained(
+                repo_id=options.llama_model,
+                filename=options.llama_model_file,
+                chat_format=options.chat_format,
+                chat_handler=options.chat_handler,
+                verbose=options.verbose,
+                n_gpu_layers=n_gpu_layers,
+                n_threads=n_threads,
+                n_ctx=n_ctx,
+                **kwargs,
+            )
+
     def __call__(
         self,
         system_message: str | None,
@@ -135,29 +149,43 @@ class LlamaModel(LLMModel):
         usage = ModelUsage(**response["usage"])
         return response_text, usage
 
-    @property
-    @abstractmethod
-    def chat_format(self) -> str:
-        pass
-
     @classmethod
     def register_arguments(cls, parser: ArgumentParser):
         group = parser.add_argument_group(f"{cls.__name__} model arguments")
         group.add_argument(
-            "--llama-path", default="models/llama-2-13b-chat.Q5_K_M.gguf"
+            "--llama-path",
+            type=str,
+            help="Specify path to local Llama model, takes precedence over --llama-model",
+        ),
+        group.add_argument(
+            "--llama-model",
+            type=str,
+            default="TheBloke/Llama-2-13B-chat-GGUF",
+            help="A pre-trained model from HF hub",
+        ),
+        group.add_argument(
+            "--llama-model-file",
+            type=str,
+            # TODO provide some help for selecting model files, and point user to set this argument if needed
+            default="llama-2-13b-chat.Q5_K_M.gguf",
+            help="Specify the model file in case of a pre-trained model from HF hub, e.g., a specific quantized version",
+        ),
+        group.add_argument(
+            "--chat-format",
+            type=str,
+            help="Override chat format (--chat-handler takes precedence if specified)",
+        )
+        group.add_argument(
+            "--chat-handler",
+            type=str,
+            help="Override chat handler (takes precedence over --chat-format)",
+        )
+        group.add_argument(
+            "--verbose",
+            "-v",
+            action="store_true",
+            help="Increase verbosity",
         )
-
-
-class Llama2(LlamaModel):
-    @property
-    def chat_format(self) -> str:
-        return "llama-2"
-
-
-class Llama3(LlamaModel):
-    @property
-    def chat_format(self) -> str:
-        return "llama-3"
 
 
 class OpenAI(LLMModel):
diff --git a/optimization.py b/optimization.py
index bbd6074c3112cceff7bdf98be0f24ce2e097a2cf..1cb09750ac46bf361c6586f58edb8f48065df84c 100644
--- a/optimization.py
+++ b/optimization.py
@@ -4,11 +4,10 @@ from itertools import zip_longest
 from pathlib import Path
 from typing import Any, Optional
 
-from tqdm import trange
-
-from models import Llama2, LLMModel, OpenAI
+from models import LLMModel
 from opt_types import ModelUsage, OptTypeEncoder, Prompt
 from task import Task
+from tqdm import trange
 from utils import initialize_run_directory, log_calls
 
 PARAPHRASE_PROMPT = """You are given an instruction that describes a task. Write a response that paraphrases the instruction. Only output the paraphrased instruction bracketed in <prompt> and </prompt>."""
diff --git a/task.py b/task.py
index 6ba4b1482fe88778a0a6087393e58615b1673482..e1e1e4f05f64146b0682d3aafdb26fe732e478d4 100644
--- a/task.py
+++ b/task.py
@@ -5,14 +5,13 @@ from functools import lru_cache
 from statistics import mean
 from typing import Union
 
+from cli import argument_parser
 from datasets import Dataset, load_dataset
 from evaluate import load as load_metric
 from llama_cpp import LlamaGrammar, deque
-from tqdm import tqdm
-
-from cli import argument_parser
-from models import Llama2, LLMModel, OpenAI
+from models import LLMModel
 from opt_types import ModelUsage
+from tqdm import tqdm
 from utils import log_calls, logger
 
 SYSTEM_MESSAGE = """
@@ -117,7 +116,7 @@ class Task:
 
     def __init__(
         self,
-        model: Union[Llama2, OpenAI],
+        model: Union[LLMModel],
         validation_dataset: str,
         test_dataset: str,
         *,