Skip to content
Snippets Groups Projects
Commit b323f2e9 authored by Grießhaber Daniel's avatar Grießhaber Daniel :squid:
Browse files

add --chat option flag to replace USE_CHAT

parent eaa1f0d5
No related branches found
No related tags found
No related merge requests found
......@@ -12,4 +12,5 @@ argument_parser.add_argument("--model-path", "-m", type=str, required=True)
argument_parser.add_argument(
"--task", "-t", type=str, required=True, choices=["sa", "qa"]
)
argument_parser.add_argument("--debug", "-d", action='store_true', default=None)
argument_parser.add_argument("--debug", "-d", action="store_true", default=None)
argument_parser.add_argument("--chat", "-c", action="store_true")
......@@ -23,9 +23,6 @@ def conv2bool(_str: Any):
return None
# whether to use chat model for LLM or not
USE_CHAT: bool = False
load_dotenv()
current_directory = Path(__file__).resolve().parent
......@@ -287,12 +284,12 @@ if __name__ == "__main__":
logger.info("Using Llama2 client as the evolution engine")
evolution_model = Llama2(
model_path=options.model_path,
chat=USE_CHAT,
chat=options.chat,
)
case "openai":
logger.info("Using OpenAI client as the evolution engine")
evolution_model = OpenAI("gpt-3.5-turbo", chat=USE_CHAT)
evolution_model = OpenAI("gpt-3.5-turbo", chat=options.chat)
# set up evaluation model
# NOTE currenty we always stick to Llama2 as evaluation model
......@@ -302,7 +299,7 @@ if __name__ == "__main__":
case "openai":
evaluation_model = Llama2(
model_path=options.model_path,
chat=USE_CHAT,
chat=options.chat,
)
match options.task:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment