Skip to content
Snippets Groups Projects
Commit 1c97f13a authored by Max Kimmich's avatar Max Kimmich
Browse files

Fix bug where prompt was not evolved correctly due to too few tokens being generated

parent 580b49d7
No related branches found
No related tags found
No related merge requests found
......@@ -119,7 +119,7 @@ class Llama(LLMModel):
prompt_suffix: str = "",
chat: bool | None = None,
stop: str = None,
max_tokens: int = 200,
max_tokens: int = None,
history: dict = None,
**kwargs: Any,
) -> tuple[str, ModelUsage]:
......@@ -252,7 +252,7 @@ class OpenAI(LLMModel):
prompt_suffix: str = "",
chat: bool | None = None,
stop: str = "</prompt>",
max_tokens: int = 200,
max_tokens: int = None,
**kwargs: Any,
) -> tuple[str, ModelUsage]:
if chat is None:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment