diff --git a/api/config.py b/api/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..92bb1db2de9f27027fc67f565e9cc2627204e489
--- /dev/null
+++ b/api/config.py
@@ -0,0 +1,3 @@
+import os
+
+DEBUG = os.getenv("DEBUG", "false").lower() == "true"
diff --git a/api/main.py b/api/main.py
index 47752dff83aafc190c90ee9c18e43b76bd38cd2e..a4532087e36d777ff84ded003e3fb6caf1949d74 100644
--- a/api/main.py
+++ b/api/main.py
@@ -1,67 +1,20 @@
-from contextlib import asynccontextmanager
-
-from fastapi import BackgroundTasks, FastAPI, Request, Response
+from fastapi import FastAPI, Request, Response
 from fastapi.staticfiles import StaticFiles
 from requests import request as make_request
 
-from api.optimization import MultiProcessOptimizer
-from api.routers import runs
-
-# see https://github.com/tiangolo/fastapi/issues/3091#issuecomment-821522932 and https://github.com/encode/starlette/issues/1094#issuecomment-730346075 for heavy-load computation
-
-DEBUG = True
-backend: MultiProcessOptimizer | None = None
-
+from api import config
+from api.routers import optimization, runs
 
-@asynccontextmanager
-async def lifespan(app: FastAPI):
-    global backend
-    # Load the backend (which runs models in a separate process)
-    backend = MultiProcessOptimizer(debug=DEBUG)
-    with backend:
-        # add endpoints from backend
-        # TODO allow to get dynamically
-        actions = [("/api/action/evolve", backend.optimizer.evolve)]
-        for path, target in actions:
-            app.add_api_route(path, target)
-        app.openapi_schema = None
-        app.openapi()
-
-        yield
-    # TODO somehow not all ressources are released upon uvicorn reload, need to investigate further..
-
-
-app = FastAPI(debug=DEBUG, title="Prompt Optimization Backend", lifespan=lifespan)
+app = FastAPI(
+    debug=config.DEBUG,
+    title="Prompt Optimization Backend",
+    lifespan=optimization.lifespan,
+)
 app.include_router(runs.router, prefix="/api/runs")
+app.include_router(optimization.router, prefix="/api/optimization")
 
 
-# start optimization
-@app.get("/api/run")
-def run(num_iterations: int, background_tasks: BackgroundTasks) -> str:
-    background_tasks.add_task(backend.run_optimization, num_iterations)
-    return f"Running optimization with {num_iterations} iterations."
-
-
-# get progress
-@app.get("/api/progress")
-def get_run_progress() -> str:
-    result = backend.get_progress()
-    return result
-
-
-# get run state
-@app.get("/api/status")
-async def get_run_status() -> bool:
-    return backend._running
-
-
-# get current genealogy of prompts
-@app.get("/api/get_tree")
-async def get_family() -> dict:
-    return backend.optimizer.family_tree
-
-
-if DEBUG:
+if config.DEBUG:
 
     @app.get("/")
     @app.get("/static/{_:path}")
diff --git a/api/optimization.py b/api/optimization.py
index 2241ebc7c0f64bf231797603200474e9fd059f8c..403c83611e2e2a4ace683d59a90b7f35db88c91f 100644
--- a/api/optimization.py
+++ b/api/optimization.py
@@ -88,15 +88,18 @@ class MultiProcessOptimizer:
     def _call_evaluation_model(self, *args, **kwargs):
         return self._evaluation_model(*args, **kwargs)
 
-    def run_optimization(self, num_iterations: int) -> str:
+    def run_optimization(self, num_iterations: int):
         self._running = True
-        self.optimizer
         self.optimizer.run(num_iterations, debug=self.debug)
         self._running = False
 
     def get_progress(self):
-        if hasattr(self.optimizer, "iterations_pbar"):
-            result = str(self.optimizer.iterations_pbar)
-        else:
-            result = "Optimization has not run yet."
-        return result
+        if self.optimizer.iterations_pbar is not None:
+            return {
+                "current": self.optimizer.iterations_pbar.n,
+                "total": self.optimizer.iterations_pbar.total,
+            }
+
+    def get_run_name(self):
+        if self.optimizer.run_directory is not None:
+            return self.optimizer.run_directory.name
diff --git a/api/routers/optimization.py b/api/routers/optimization.py
new file mode 100644
index 0000000000000000000000000000000000000000..c706fac360c94b57317c5bc2e9668bdc0c5df3d8
--- /dev/null
+++ b/api/routers/optimization.py
@@ -0,0 +1,42 @@
+from contextlib import asynccontextmanager
+
+from fastapi import APIRouter, BackgroundTasks, FastAPI
+
+from api import config
+from api.optimization import MultiProcessOptimizer  # start optimization
+
+router = APIRouter()
+backend: MultiProcessOptimizer | None = None
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+    global backend
+    # Load the backend (which runs models in a separate process)
+    backend = MultiProcessOptimizer(debug=config.DEBUG)
+    with backend:
+        actions = [("/api/action/evolve", backend.optimizer.evolve)]
+        for path, target in actions:
+            app.add_api_route(path, target)
+        app.openapi_schema = None
+        app.openapi()
+
+        yield
+    # TODO somehow not all ressources are released upon uvicorn reload, need to investigate further..
+
+
+@router.post("/run")
+def run(num_iterations: int, background_tasks: BackgroundTasks) -> str:
+    assert backend is not None
+    background_tasks.add_task(backend.run_optimization, num_iterations)
+    return f"Running optimization with {num_iterations} iterations."
+
+
+@router.get("/status")
+def get_status():
+    assert backend is not None
+    return {
+        "running": backend._running,
+        "progress": backend.get_progress(),
+        "name": backend.get_run_name(),
+    }
diff --git a/optimization.py b/optimization.py
index 608a31628c9097ee33e47d5cea1133ddf5dc6f81..bbd6074c3112cceff7bdf98be0f24ce2e097a2cf 100644
--- a/optimization.py
+++ b/optimization.py
@@ -2,7 +2,7 @@ import json
 from abc import abstractmethod
 from itertools import zip_longest
 from pathlib import Path
-from typing import Any
+from typing import Any, Optional
 
 from tqdm import trange
 
@@ -43,15 +43,16 @@ def paraphrase_prompts(
 
 
 class PromptOptimization:
-    total_evaluation_usage: ModelUsage
-    total_evolution_usage: ModelUsage
-    run_directory: Path
+    total_evaluation_usage: ModelUsage | None = None
+    total_evolution_usage: ModelUsage | None = None
+    run_directory: Path | None = None
     # P contains the list of prompts at each generation
-    P: list[list[Prompt]]
+    P: list[list[Prompt]] | None = None
     # family_tree contains the relation of prompts to its parents
-    family_tree: dict[str, tuple[str, ...] | None]
+    family_tree: dict[str, tuple[str, ...] | None] | None = None
     # all_prompts contains a list of Prompt objects that took part in the optimization
-    all_prompts: dict[str, Prompt]
+    all_prompts: dict[str, Prompt] | None = None
+    iterations_pbar: Optional[trange] = None
 
     def __init__(
         self,