diff --git a/evoprompt/evolution/evolution.py b/evoprompt/evolution/evolution.py
index 7412f1f4c972442d8f92ff0cb336c33aaa7e66d0..e57e6817dd3b93b934d78e5ea6ece439a1c15475 100644
--- a/evoprompt/evolution/evolution.py
+++ b/evoprompt/evolution/evolution.py
@@ -290,7 +290,7 @@ class GeneticAlgorithm(EvolutionAlgorithm):
             prompt1=prompt_1,
             prompt2=prompt_2,
         )
-        response, history, recent_turn, usage = self.evolution_model.create_completion(
+        response, _, recent_turn, usage = self.evolution_model.create_completion(
             system_message=SYSTEM_MESSAGE,
             prompt=filled_prompt,
             history=demo_messages if self.use_evolution_demo else None,
@@ -403,7 +403,7 @@ class DifferentialEvolution(EvolutionAlgorithm):
             prompt3=best_prompt_current_evolution,
             basic_prompt=prompts_current_evolution[current_iteration],
         )
-        response, history, recent_turn, usage = self.evolution_model.create_completion(
+        response, _, recent_turn, usage = self.evolution_model.create_completion(
             system_message=SYSTEM_MESSAGE,
             prompt=filled_prompt,
             history=demo_messages if self.use_evolution_demo else None,
@@ -511,7 +511,6 @@ class DifferentialEvolutionWithCot(DifferentialEvolution):
         evolutions_steps = []
         # list (turns) of list (demonstrations)
         demos = [[]]
-        response: str = ""
         judgements: list[Judgement] = []
         usage: ModelUsage = ModelUsage()
         for idx, prompt in enumerate(self._get_prompt_template()):
@@ -573,11 +572,11 @@ class DifferentialEvolutionWithCot(DifferentialEvolution):
                 )
 
             # replace last message with corrected response
-            recent_turn[-1]["content"] = judgement.corrected_response
             response = judgement.corrected_response
 
             # update history with recent turn
             history += recent_turn
+            history.append(self.evolution_model._get_assistant_message(response))
 
         evolved_prompt = self.parse_response(response)
 
diff --git a/evoprompt/models.py b/evoprompt/models.py
index f80a582e6eaf79e5fb254995e1f46c3d042f98da..cb7c4ff342cd05cb723dacd8617fccaff830d246 100644
--- a/evoprompt/models.py
+++ b/evoprompt/models.py
@@ -118,7 +118,6 @@ class LLMModel(ABC):
             use_randomness=use_randomness,
         )
 
-        messages.append(self._get_assistant_message(reponse))
         return reponse, None, messages, usage
 
     def build_demonstration_data(
@@ -438,7 +437,7 @@ class ChatModel:
         return (
             reponse,
             history,
-            messages + [self._get_assistant_message(reponse)],
+            messages,
             usage,
         )
 
diff --git a/evoprompt/optimization.py b/evoprompt/optimization.py
index db40b63827d727674ed9bd3e14f03fbcd7f1f765..cc3710aca89103a63f28fc43768ec23387a2d978 100644
--- a/evoprompt/optimization.py
+++ b/evoprompt/optimization.py
@@ -47,11 +47,13 @@ class ResponseEditor(App):
         instruction: str,
         original_response: str,
         history: ChatMessages,
+        recent_turn: ChatMessages,
         judge_response: str,
     ):
         self.instruction = instruction
         self.response = original_response
         self.history = history
+        self.recent_turn = recent_turn
         self.judge_response = judge_response
         self.skip = False  # used to mark the prompt as skipped
         super().__init__()
@@ -64,11 +66,22 @@ class ResponseEditor(App):
                     Collapsible(
                         Static(message["content"]),
                         title=message["role"],
-                        collapsed=idx != len(self.history) - 1,
+                        collapsed=True,
                     )
-                    for idx, message in enumerate(self.history)
+                    for message in self.history
+                )
+            )
+        yield ScrollableContainer(
+            *(
+                Collapsible(
+                    Static(message["content"]),
+                    title=message["role"],
+                    collapsed=False,
                 )
+                for message in self.recent_turn
             )
+        )
+
         yield ScrollableContainer(
             Label(Panel(self.judge_response, title="Judge response")),
             Label(Rule(title="Response to edit"), expand=True),
@@ -408,7 +421,8 @@ class PromptOptimization:
         editor = ResponseEditor(
             instruction,
             response,
-            history[:-1] if history is not None else None,
+            history if history is not None else None,
+            recent_turn=recent_turn,
             judge_response=judgement_response,
         )
         editor.run()