diff --git a/evoprompt/models.py b/evoprompt/models.py
index 2dabae9fa66685d56c5721f4dde2a6cc1942c323..2fa83f6d5fe6ed71dd88000cfced5be6a9920595 100644
--- a/evoprompt/models.py
+++ b/evoprompt/models.py
@@ -468,6 +468,7 @@ class HfChat(ChatModel, LLMModel):
         stop: str | None,
         max_tokens: int | None,
         enforce_randomness: bool,
+        **kwargs,
     ):
         # setup kwargs for model call
         model_call_kwargs = {
@@ -512,6 +513,26 @@ class AlpacaHfChat(HfChat):
         # chat template for Alpaca adapted from https://huggingface.co/Vezora/Mistral-22B-v0.1/blob/c15d70465e2fc46c3c4d7fec8fb62f533d4ef09b/tokenizer_config.json#L30
         self.pipeline.tokenizer.chat_template = "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ system_message + '\\n\\n' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '### Instruction:\\n' + message['content'].strip() + '\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response:\\n'  + message['content'].strip()}}{% endif %}{% endfor %}"
 
+    def _create_completion(
+        self,
+        messages: list[dict[str, str]],
+        *,
+        use_cache: bool,
+        stop: str | None,
+        max_tokens: int | None,
+        enforce_randomness: bool,
+        **kwargs,
+    ):
+        # for some reason adding an empty assistant message yields different generations than adding it manually in the chat template
+        return super()._create_completion(
+            messages + [self._get_assistant_message("")],
+            use_cache=use_cache,
+            stop=stop,
+            max_tokens=max_tokens,
+            enforce_randomness=enforce_randomness,
+            **kwargs,
+        )
+
     def _get_input_prefix(self):
         return "### Input:\n"