diff --git a/evoprompt/api/backend.py b/evoprompt/api/backend.py
index 95a99bb0a046bb71af58d1ddd1d822ced597ac76..0595f7aecfe0d863299b6567d0a0888ccf63a7a0 100644
--- a/evoprompt/api/backend.py
+++ b/evoprompt/api/backend.py
@@ -6,10 +6,11 @@ from typing import ClassVar
 
 from fastapi import FastAPI
 
-from api import config
-from evolution import GeneticAlgorithm
-from models import Llama, LLMModel
-from task import SentimentAnalysis
+from evoprompt.api import config
+from evoprompt.cli import argument_parser
+from evoprompt.evolution import GeneticAlgorithm
+from evoprompt.models import Llama, LLMModel
+from evoprompt.task import SentimentAnalysis
 
 
 class MultiProcessOptimizer:
@@ -29,7 +30,13 @@ class MultiProcessOptimizer:
         self, *, event_loop: asyncio.BaseEventLoop, debug: bool = False
     ) -> None:
         self.event_loop = event_loop
-        self.debug = debug
+        self.options = argument_parser.parse_args(
+            [
+                "--debug" if debug else "",
+                "--task",
+                SentimentAnalysis.shorthand,
+            ]
+        )
 
     def __enter__(self):
         # TODO allow to customize optimizer
@@ -48,8 +55,7 @@ class MultiProcessOptimizer:
         ).result()
 
         # currently fix task
-        options = Namespace(use_grammar=False, debug=self.debug)
-        task = SentimentAnalysis(evaluation_model, options)
+        task = SentimentAnalysis(evaluation_model, self.options)
 
         optimizer_class = GeneticAlgorithm
         # optimizer_class = DifferentialEvolution
@@ -59,7 +65,7 @@ class MultiProcessOptimizer:
             task=task,
             evolution_model=evolution_model,
             evaluation_model=evaluation_model,
-            run_options=options.__dict__,
+            run_options=self.options.__dict__,
         )
 
     def __exit__(self, exc_type, exc_value, exc_tb):
@@ -72,11 +78,7 @@ class MultiProcessOptimizer:
         if self._evolution_model is not None:
             raise Exception("Evolution model has already been initialized.")
 
-        # currently fix model
-        options = Namespace(
-            llama_path="./models/llama-2-13b-chat.Q5_K_M.gguf", chat=True
-        )
-        self._evolution_model = Llama(options)
+        self._evolution_model = Llama(self.options)
 
         if self._evaluation_model is not None:
             raise Exception("Evaluation model has already been initialized.")
diff --git a/evoprompt/api/main.py b/evoprompt/api/main.py
index cdb57b3eb1b6b51cb822821f068129bc0c4d8360..c9ca0a3891aa69bf40cb3fa7481cde27af02869b 100644
--- a/evoprompt/api/main.py
+++ b/evoprompt/api/main.py
@@ -2,8 +2,8 @@ from fastapi import FastAPI, Request, Response
 from fastapi.staticfiles import StaticFiles
 from requests import request as make_request
 
-from api import backend, config
-from api.routers import optimization, runs
+from evoprompt.api import backend, config
+from evoprompt.api.routers import optimization, runs
 
 app = FastAPI(
     debug=config.DEBUG,
diff --git a/evoprompt/api/routers/optimization.py b/evoprompt/api/routers/optimization.py
index 8366dc90b0d605a98001b2213461b7f75298a152..247601e3b7ef3c18aecb0f9386b76efb9d9f1e90 100644
--- a/evoprompt/api/routers/optimization.py
+++ b/evoprompt/api/routers/optimization.py
@@ -1,6 +1,6 @@
 from fastapi import APIRouter, BackgroundTasks
 
-from api import backend
+from evoprompt.api import backend
 
 router = APIRouter()