From 1083a08debe19269c145e6318d8a6c91aee68926 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Daniel=20Grie=C3=9Fhaber?= <griesshaber@hdm-stuttgart.de>
Date: Fri, 16 Aug 2024 18:22:12 +0200
Subject: [PATCH] remove max_tokens parameter from create_completion

---
 evoprompt/models.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/evoprompt/models.py b/evoprompt/models.py
index 1e08439..1349359 100644
--- a/evoprompt/models.py
+++ b/evoprompt/models.py
@@ -75,7 +75,6 @@ class LLMModel(ABC):
     ) -> tuple[str, ModelUsage]:
         if chat is None:
             chat = self.chat
-        max_tokens = kwargs.pop("max_tokens", self.options.max_tokens)
 
         # create prompt
         prompt = prompt_prefix + prompt + prompt_suffix + prompt_appendix
-- 
GitLab