Skip to content
Snippets Groups Projects
Commit f73cc390 authored by Grießhaber Daniel's avatar Grießhaber Daniel :squid:
Browse files

add manual prompt evolution endpoints

parent 57f9c9ac
Branches integrate-frontend
No related tags found
No related merge requests found
import asyncio
from argparse import Namespace
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager
from typing import ClassVar
from fastapi import FastAPI
from api import config
from evolution import GeneticAlgorithm
from models import Llama2, LLMModel
from task import SentimentAnalysis
......@@ -11,7 +15,7 @@ from task import SentimentAnalysis
class MultiProcessOptimizer:
_instance: ClassVar["MultiProcessOptimizer"] = None
_running: bool = False
model_exec: ProcessPoolExecutor = None
model_exec: ThreadPoolExecutor = None
_evolution_model: LLMModel | None = None
_evaluation_model: LLMModel | None = None
......@@ -21,8 +25,10 @@ class MultiProcessOptimizer:
cls._instance = super(MultiProcessOptimizer, cls).__new__(cls)
return cls._instance
def __init__(self, *, debug: bool = False) -> None:
# a flag indicating whether optimizer is currently running
def __init__(
self, *, event_loop: asyncio.BaseEventLoop, debug: bool = False
) -> None:
self.event_loop = event_loop
self.debug = debug
def __enter__(self):
......@@ -103,3 +109,23 @@ class MultiProcessOptimizer:
def get_run_name(self):
if self.optimizer.run_directory is not None:
return self.optimizer.run_directory.name
async def evolve(self, prompt1: str, prompt2: str):
return await self.event_loop.run_in_executor(
self.model_exec, self.optimizer.evolve, prompt1, prompt2
)
instance: MultiProcessOptimizer | None = None
@asynccontextmanager
async def lifespan(app: FastAPI):
global instance
# Load the instance (which runs models in a separate thread)
instance = MultiProcessOptimizer(
event_loop=asyncio.get_running_loop(),
debug=config.DEBUG,
)
with instance:
yield
......@@ -2,13 +2,13 @@ from fastapi import FastAPI, Request, Response
from fastapi.staticfiles import StaticFiles
from requests import request as make_request
from api import config
from api import backend, config
from api.routers import optimization, runs
app = FastAPI(
debug=config.DEBUG,
title="Prompt Optimization Backend",
lifespan=optimization.lifespan,
lifespan=backend.lifespan,
)
app.include_router(runs.router, prefix="/api/runs")
app.include_router(optimization.router, prefix="/api/optimization")
......
from contextlib import asynccontextmanager
from fastapi import APIRouter, BackgroundTasks
from fastapi import APIRouter, BackgroundTasks, FastAPI
from api import config
from api.optimization import MultiProcessOptimizer # start optimization
from api import backend
router = APIRouter()
backend: MultiProcessOptimizer | None = None
@asynccontextmanager
async def lifespan(app: FastAPI):
global backend
# Load the backend (which runs models in a separate process)
backend = MultiProcessOptimizer(debug=config.DEBUG)
with backend:
actions = [("/api/action/evolve", backend.optimizer.evolve)]
for path, target in actions:
app.add_api_route(path, target)
app.openapi_schema = None
app.openapi()
yield
# TODO somehow not all ressources are released upon uvicorn reload, need to investigate further..
@router.post("/run")
def run(num_iterations: int, background_tasks: BackgroundTasks) -> str:
assert backend is not None
background_tasks.add_task(backend.run_optimization, num_iterations)
assert backend.instance is not None
background_tasks.add_task(backend.instance.run_optimization, num_iterations)
return f"Running optimization with {num_iterations} iterations."
@router.get("/actions/evolve")
async def evolve(prompt1: str, prompt2: str, background_tasks: BackgroundTasks) -> str:
assert backend.instance is not None
result, usage = await backend.instance.evolve(prompt1, prompt2)
return {
"result": result,
"usage": usage,
}
@router.get("/status")
def get_status():
assert backend is not None
assert backend.instance is not None
return {
"running": backend._running,
"progress": backend.get_progress(),
"name": backend.get_run_name(),
"running": backend.instance._running,
"progress": backend.instance.get_progress(),
"name": backend.instance.get_run_name(),
}
Subproject commit 6f8310669f705264bc136b7d15a551bbfc2ebf08
Subproject commit 01e209a3b80adf90f17b0697522d2ad644c0ba15
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment