diff --git a/api/optimization.py b/api/backend.py
similarity index 80%
rename from api/optimization.py
rename to api/backend.py
index 403c83611e2e2a4ace683d59a90b7f35db88c91f..df18776b696c2ea501246126a4870322fb78ac82 100644
--- a/api/optimization.py
+++ b/api/backend.py
@@ -1,8 +1,12 @@
+import asyncio
 from argparse import Namespace
-from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
-from functools import partial
+from concurrent.futures import ThreadPoolExecutor
+from contextlib import asynccontextmanager
 from typing import ClassVar
 
+from fastapi import FastAPI
+
+from api import config
 from evolution import GeneticAlgorithm
 from models import Llama2, LLMModel
 from task import SentimentAnalysis
@@ -11,7 +15,7 @@ from task import SentimentAnalysis
 class MultiProcessOptimizer:
     _instance: ClassVar["MultiProcessOptimizer"] = None
     _running: bool = False
-    model_exec: ProcessPoolExecutor = None
+    model_exec: ThreadPoolExecutor = None
     _evolution_model: LLMModel | None = None
     _evaluation_model: LLMModel | None = None
 
@@ -21,8 +25,10 @@ class MultiProcessOptimizer:
             cls._instance = super(MultiProcessOptimizer, cls).__new__(cls)
         return cls._instance
 
-    def __init__(self, *, debug: bool = False) -> None:
-        # a flag indicating whether optimizer is currently running
+    def __init__(
+        self, *, event_loop: asyncio.BaseEventLoop, debug: bool = False
+    ) -> None:
+        self.event_loop = event_loop
         self.debug = debug
 
     def __enter__(self):
@@ -103,3 +109,23 @@ class MultiProcessOptimizer:
     def get_run_name(self):
         if self.optimizer.run_directory is not None:
             return self.optimizer.run_directory.name
+
+    async def evolve(self, prompt1: str, prompt2: str):
+        return await self.event_loop.run_in_executor(
+            self.model_exec, self.optimizer.evolve, prompt1, prompt2
+        )
+
+
+instance: MultiProcessOptimizer | None = None
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+    global instance
+    # Load the instance (which runs models in a separate thread)
+    instance = MultiProcessOptimizer(
+        event_loop=asyncio.get_running_loop(),
+        debug=config.DEBUG,
+    )
+    with instance:
+        yield
diff --git a/api/main.py b/api/main.py
index a4532087e36d777ff84ded003e3fb6caf1949d74..cdb57b3eb1b6b51cb822821f068129bc0c4d8360 100644
--- a/api/main.py
+++ b/api/main.py
@@ -2,13 +2,13 @@ from fastapi import FastAPI, Request, Response
 from fastapi.staticfiles import StaticFiles
 from requests import request as make_request
 
-from api import config
+from api import backend, config
 from api.routers import optimization, runs
 
 app = FastAPI(
     debug=config.DEBUG,
     title="Prompt Optimization Backend",
-    lifespan=optimization.lifespan,
+    lifespan=backend.lifespan,
 )
 app.include_router(runs.router, prefix="/api/runs")
 app.include_router(optimization.router, prefix="/api/optimization")
diff --git a/api/routers/optimization.py b/api/routers/optimization.py
index c706fac360c94b57317c5bc2e9668bdc0c5df3d8..8366dc90b0d605a98001b2213461b7f75298a152 100644
--- a/api/routers/optimization.py
+++ b/api/routers/optimization.py
@@ -1,42 +1,32 @@
-from contextlib import asynccontextmanager
+from fastapi import APIRouter, BackgroundTasks
 
-from fastapi import APIRouter, BackgroundTasks, FastAPI
-
-from api import config
-from api.optimization import MultiProcessOptimizer  # start optimization
+from api import backend
 
 router = APIRouter()
-backend: MultiProcessOptimizer | None = None
-
-
-@asynccontextmanager
-async def lifespan(app: FastAPI):
-    global backend
-    # Load the backend (which runs models in a separate process)
-    backend = MultiProcessOptimizer(debug=config.DEBUG)
-    with backend:
-        actions = [("/api/action/evolve", backend.optimizer.evolve)]
-        for path, target in actions:
-            app.add_api_route(path, target)
-        app.openapi_schema = None
-        app.openapi()
-
-        yield
-    # TODO somehow not all ressources are released upon uvicorn reload, need to investigate further..
 
 
 @router.post("/run")
 def run(num_iterations: int, background_tasks: BackgroundTasks) -> str:
-    assert backend is not None
-    background_tasks.add_task(backend.run_optimization, num_iterations)
+    assert backend.instance is not None
+    background_tasks.add_task(backend.instance.run_optimization, num_iterations)
     return f"Running optimization with {num_iterations} iterations."
 
 
+@router.get("/actions/evolve")
+async def evolve(prompt1: str, prompt2: str, background_tasks: BackgroundTasks) -> str:
+    assert backend.instance is not None
+    result, usage = await backend.instance.evolve(prompt1, prompt2)
+    return {
+        "result": result,
+        "usage": usage,
+    }
+
+
 @router.get("/status")
 def get_status():
-    assert backend is not None
+    assert backend.instance is not None
     return {
-        "running": backend._running,
-        "progress": backend.get_progress(),
-        "name": backend.get_run_name(),
+        "running": backend.instance._running,
+        "progress": backend.instance.get_progress(),
+        "name": backend.instance.get_run_name(),
     }
diff --git a/frontend b/frontend
index 6f8310669f705264bc136b7d15a551bbfc2ebf08..01e209a3b80adf90f17b0697522d2ad644c0ba15 160000
--- a/frontend
+++ b/frontend
@@ -1 +1 @@
-Subproject commit 6f8310669f705264bc136b7d15a551bbfc2ebf08
+Subproject commit 01e209a3b80adf90f17b0697522d2ad644c0ba15