From 9462d6431ed08b7c9f6978f0f93fa6639cd7686c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Daniel=20Grie=C3=9Fhaber?= <griesshaber@hdm-stuttgart.de>
Date: Tue, 20 Aug 2024 10:48:47 +0200
Subject: [PATCH] allow to diable llmaaj by not specifying judgement engine

---
 evoprompt/evolution.py    | 2 +-
 evoprompt/optimization.py | 7 +++++--
 main.py                   | 5 +----
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/evoprompt/evolution.py b/evoprompt/evolution.py
index 2b4e6c7..9e1a9f3 100644
--- a/evoprompt/evolution.py
+++ b/evoprompt/evolution.py
@@ -46,7 +46,7 @@ class EvolutionAlgorithm(PromptOptimization, metaclass=ABCMeta):
         task: Task,
         evolution_model: LLMModel,
         evaluation_model: LLMModel,
-        judge_model: LLMModel,
+        judge_model: LLMModel | None,
         run_options: dict[str, Any] = {},
     ) -> None:
         super().__init__(
diff --git a/evoprompt/optimization.py b/evoprompt/optimization.py
index 4f91aba..94ef340 100644
--- a/evoprompt/optimization.py
+++ b/evoprompt/optimization.py
@@ -24,7 +24,7 @@ PromptSource = Literal["baseprompt", "paraphrase", "evolution", "corrected"]
 class Judgement(NamedTuple):
     original_response: str
     corrected_response: str
-    happy: bool
+    happy: bool | None
 
 
 class PromptMeta(TypedDict):
@@ -122,7 +122,7 @@ class PromptOptimization:
         task: Task,
         evolution_model: LLMModel,
         evaluation_model: LLMModel,
-        judge_model: LLMModel,
+        judge_model: LLMModel | None,
         run_options: dict[str, Any] = {},
     ) -> None:
         self.task = task
@@ -278,6 +278,9 @@ class PromptOptimization:
     def judge_and_correct_step(
         self, instruction: str, response: str, history: ChatMessages
     ) -> Judgement:
+        if self.judge_model is None:
+            return Judgement(response, response, happy=None)
+
         # TODO: judge the actual response
         judge_happy = False
 
diff --git a/main.py b/main.py
index a6474b2..c3a2fc3 100644
--- a/main.py
+++ b/main.py
@@ -69,13 +69,10 @@ if __name__ == "__main__":
         case "openai":
             logger.info(f"Using {options.openai_model} as the evolution engine")
 
-    judge_model: LLMModel
+    judge_model: LLMModel | None
     if options.judge_engine is not None:
         judge_model = LLMModel.get_model(options.judge_engine, options=options)
         logger.info(f"Using {options.judge_engine} as the judge engine")
-    else:
-        judge_model = evolution_model
-        logger.info("Using the same model for judging as for evolution")
 
     # set up evaluation model
     # NOTE currenty we always stick to Llama as evaluation engine
-- 
GitLab