From 582562b5bd06c13f95473e5719ff611de4f6eb41 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Daniel=20Grie=C3=9Fhaber?= <griesshaber@hdm-stuttgart.de>
Date: Mon, 29 Jul 2024 10:26:23 +0200
Subject: [PATCH] update package imports for API

---
 evoprompt/api/backend.py              | 28 ++++++++++++++-------------
 evoprompt/api/main.py                 |  4 ++--
 evoprompt/api/routers/optimization.py |  2 +-
 3 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/evoprompt/api/backend.py b/evoprompt/api/backend.py
index 95a99bb..0595f7a 100644
--- a/evoprompt/api/backend.py
+++ b/evoprompt/api/backend.py
@@ -6,10 +6,11 @@ from typing import ClassVar
 
 from fastapi import FastAPI
 
-from api import config
-from evolution import GeneticAlgorithm
-from models import Llama, LLMModel
-from task import SentimentAnalysis
+from evoprompt.api import config
+from evoprompt.cli import argument_parser
+from evoprompt.evolution import GeneticAlgorithm
+from evoprompt.models import Llama, LLMModel
+from evoprompt.task import SentimentAnalysis
 
 
 class MultiProcessOptimizer:
@@ -29,7 +30,13 @@ class MultiProcessOptimizer:
         self, *, event_loop: asyncio.BaseEventLoop, debug: bool = False
     ) -> None:
         self.event_loop = event_loop
-        self.debug = debug
+        self.options = argument_parser.parse_args(
+            [
+                "--debug" if debug else "",
+                "--task",
+                SentimentAnalysis.shorthand,
+            ]
+        )
 
     def __enter__(self):
         # TODO allow to customize optimizer
@@ -48,8 +55,7 @@ class MultiProcessOptimizer:
         ).result()
 
         # currently fix task
-        options = Namespace(use_grammar=False, debug=self.debug)
-        task = SentimentAnalysis(evaluation_model, options)
+        task = SentimentAnalysis(evaluation_model, self.options)
 
         optimizer_class = GeneticAlgorithm
         # optimizer_class = DifferentialEvolution
@@ -59,7 +65,7 @@ class MultiProcessOptimizer:
             task=task,
             evolution_model=evolution_model,
             evaluation_model=evaluation_model,
-            run_options=options.__dict__,
+            run_options=self.options.__dict__,
         )
 
     def __exit__(self, exc_type, exc_value, exc_tb):
@@ -72,11 +78,7 @@ class MultiProcessOptimizer:
         if self._evolution_model is not None:
             raise Exception("Evolution model has already been initialized.")
 
-        # currently fix model
-        options = Namespace(
-            llama_path="./models/llama-2-13b-chat.Q5_K_M.gguf", chat=True
-        )
-        self._evolution_model = Llama(options)
+        self._evolution_model = Llama(self.options)
 
         if self._evaluation_model is not None:
             raise Exception("Evaluation model has already been initialized.")
diff --git a/evoprompt/api/main.py b/evoprompt/api/main.py
index cdb57b3..c9ca0a3 100644
--- a/evoprompt/api/main.py
+++ b/evoprompt/api/main.py
@@ -2,8 +2,8 @@ from fastapi import FastAPI, Request, Response
 from fastapi.staticfiles import StaticFiles
 from requests import request as make_request
 
-from api import backend, config
-from api.routers import optimization, runs
+from evoprompt.api import backend, config
+from evoprompt.api.routers import optimization, runs
 
 app = FastAPI(
     debug=config.DEBUG,
diff --git a/evoprompt/api/routers/optimization.py b/evoprompt/api/routers/optimization.py
index 8366dc9..247601e 100644
--- a/evoprompt/api/routers/optimization.py
+++ b/evoprompt/api/routers/optimization.py
@@ -1,6 +1,6 @@
 from fastapi import APIRouter, BackgroundTasks
 
-from api import backend
+from evoprompt.api import backend
 
 router = APIRouter()
 
-- 
GitLab