From 1c97f13a1658c2deebe105de3686fe9282a42444 Mon Sep 17 00:00:00 2001
From: Maximilian Schmidt <maximilian.schmidt@ims.uni-stuttgart.de>
Date: Fri, 9 Aug 2024 16:06:06 +0200
Subject: [PATCH] Fix bug where prompt was not evolved correctly due to too few
 tokens being generated

---
 evoprompt/models.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/evoprompt/models.py b/evoprompt/models.py
index d2f45dc..0d11a86 100644
--- a/evoprompt/models.py
+++ b/evoprompt/models.py
@@ -119,7 +119,7 @@ class Llama(LLMModel):
         prompt_suffix: str = "",
         chat: bool | None = None,
         stop: str = None,
-        max_tokens: int = 200,
+        max_tokens: int = None,
         history: dict = None,
         **kwargs: Any,
     ) -> tuple[str, ModelUsage]:
@@ -252,7 +252,7 @@ class OpenAI(LLMModel):
         prompt_suffix: str = "",
         chat: bool | None = None,
         stop: str = "</prompt>",
-        max_tokens: int = 200,
+        max_tokens: int = None,
         **kwargs: Any,
     ) -> tuple[str, ModelUsage]:
         if chat is None:
-- 
GitLab