From 975b2fd885e405fe7ad432501cbe72fbe85d34bf Mon Sep 17 00:00:00 2001
From: Maximilian Kimmich <maximilian.kimmich@ims.uni-stuttgart.de>
Date: Thu, 10 Oct 2024 16:11:37 +0200
Subject: [PATCH] Increase 'max_tokens'  for HfChat to reduce chance of
 exceeding token limit

---
 evoprompt/models.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/evoprompt/models.py b/evoprompt/models.py
index 57e219a..9566f15 100644
--- a/evoprompt/models.py
+++ b/evoprompt/models.py
@@ -535,7 +535,7 @@ class HfChat(ChatModel, LLMModel):
         model_call_kwargs = {
             "text_inputs": messages,
             "stop": stop,
-            "max_length": max_tokens if max_tokens is not None else 2048,
+            "max_length": max_tokens if max_tokens is not None else 16384,
         }
         if use_randomness:
             # same temperature as in evoprompt paper reference implementation
-- 
GitLab