diff --git a/evoprompt/optimization.py b/evoprompt/optimization.py index 81538709c460da09134e5fbe3841e1595105986b..095beaa78a663c0af56c2e8dfa15a89083175c6b 100644 --- a/evoprompt/optimization.py +++ b/evoprompt/optimization.py @@ -93,8 +93,8 @@ class ResponseEditor(App): return self.text_area.text -@log_calls("Paraphrasing prompts") -def paraphrase_prompts( +@log_calls("Paraphrasing prompt") +def paraphrase_prompt( model: LLMModel, prompt: str, n: int, @@ -112,7 +112,8 @@ def paraphrase_prompts( num_tries += 1 paraphrase, _, _, usage = model.create_completion( system_message=PARAPHRASE_PROMPT, - prompt=f"Instruction: {prompt}", + prompt=prompt, + enforce_randomness=True, ) total_usage += usage if "<prompt>" in paraphrase: @@ -200,7 +201,7 @@ class PromptOptimization: # fill up the rest with paraphrases of the top prompts promptindex_to_paraphrase = 0 while len(initial_population) < num_initial_prompts: - paraphrases, paraphrase_usage = paraphrase_prompts( + paraphrases, paraphrase_usage = paraphrase_prompt( self.evolution_model, top_prompts[promptindex_to_paraphrase], n=1,