From e6832e69ddc6e127f5db7f7b5893521098085500 Mon Sep 17 00:00:00 2001
From: Maximilian Schmidt <maximilian.schmidt@ims.uni-stuttgart.de>
Date: Thu, 5 Sep 2024 16:31:17 +0200
Subject: [PATCH] Use Llama3.1 as default for llamachat

---
 evoprompt/models.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/evoprompt/models.py b/evoprompt/models.py
index 1d340e3..5a5a156 100644
--- a/evoprompt/models.py
+++ b/evoprompt/models.py
@@ -240,14 +240,14 @@ class Llama(LLMModel):
         group.add_argument(
             "--llama-model",
             type=str,
-            default="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
+            default="QuantFactory/Meta-Llama-3.1-8B-Instruct-GGUF",
             help="A pre-trained model from HF hub",
         ),
         group.add_argument(
             "--llama-model-file",
             type=str,
             # TODO provide some help for selecting model files, and point user to set this argument if needed
-            default="Meta-Llama-3-8B-Instruct.Q5_K_M.gguf",
+            default="Meta-Llama-3.1-8B-Instruct.Q8_0.gguf",
             help="Specify the model file in case of a pre-trained model from HF hub, e.g., a specific quantized version",
         ),
         group.add_argument(
-- 
GitLab