Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docker-compose-ai-eng/solution-files/.dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
.env
__pycache__
10 changes: 10 additions & 0 deletions docker-compose-ai-eng/solution-files/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FROM python:3.10-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY . .

CMD ["uvicorn", "main:app", "--port", "8000", "--host", "0.0.0.0"]
30 changes: 30 additions & 0 deletions docker-compose-ai-eng/solution-files/compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
services:
app:
build: .
container_name: optimizer
ports:
- "8000:8000"
env_file:
- .env
environment:
DB_HOST: db
POSTGRES_DB: optimized_prompts
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
depends_on:
- db

db:
image: postgres:15
container_name: optimizer-db
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: optimized_prompts
ports:
- "5432:5432"
volumes:
- pgdata:/var/lib/postgresql/data

volumes:
pgdata:
66 changes: 66 additions & 0 deletions docker-compose-ai-eng/solution-files/database.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import psycopg2
import os
import time
import logging
from datetime import datetime, timezone

logger = logging.getLogger(__name__)


def get_db_connection():
"""Create a connection to the PostgreSQL database."""
return psycopg2.connect(
host=os.getenv("DB_HOST", "db"),
port=int(os.getenv("DB_PORT", "5432")),
dbname=os.getenv("POSTGRES_DB", "optimized_prompts"),
user=os.getenv("POSTGRES_USER", "postgres"),
password=os.getenv("POSTGRES_PASSWORD", "postgres"),
)


def init_db(max_retries=5, retry_delay=2):
"""Create the optimization_logs table if it doesn't exist."""
for attempt in range(1, max_retries + 1):
try:
conn = get_db_connection()
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS optimization_logs (
id SERIAL PRIMARY KEY,
original_prompt TEXT NOT NULL,
optimized_prompt TEXT NOT NULL,
changes TEXT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT NOW()
);
""")
conn.commit()
cur.close()
conn.close()
logger.info("Database initialized successfully.")
return
except psycopg2.OperationalError as e:
if attempt < max_retries:
logger.warning(
f"Database not ready (attempt {attempt}/{max_retries}). "
f"Retrying in {retry_delay}s..."
)
time.sleep(retry_delay)
else:
raise


def log_optimization(original_prompt, optimized_prompt, changes):
"""Write an optimization result to the database."""
conn = get_db_connection()
cur = conn.cursor()
cur.execute(
"""
INSERT INTO optimization_logs
(original_prompt, optimized_prompt, changes, created_at)
VALUES (%s, %s, %s, %s);
""",
(original_prompt, optimized_prompt, changes, datetime.now(timezone.utc)),
)
conn.commit()
cur.close()
conn.close()
60 changes: 60 additions & 0 deletions docker-compose-ai-eng/solution-files/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
from service import optimize_prompt
import logging
from database import init_db, log_optimization

logger = logging.getLogger(__name__)

app = FastAPI()

init_db()

@app.get("/health")
def health_check():
return {"status": "ok"}

class PromptRequest(BaseModel):
prompt: str = Field(
description="The original prompt to optimize"
)
goal: str = Field(
description="What the prompt should accomplish"
)
model_config = {"extra": "forbid"}

class PromptResponse(BaseModel):
original_prompt: str = Field(
description="The original prompt that was submitted"
)
optimized_prompt: str = Field(
description="The improved version of the prompt"
)
changes: str = Field(
description="Explanation of what was improved and why"
)

@app.post("/optimize", response_model=PromptResponse)
def optimize_prompt_endpoint(request: PromptRequest):
try:
result = optimize_prompt(request.prompt, request.goal)
except ValueError as e:
logger.error(f"Optimization failed: {e}")
raise HTTPException(status_code=502, detail="Invalid upstream LLM response. Please retry.")
except Exception as e:
logger.error(f"Unexpected error: {e}")
raise HTTPException(
status_code=500,
detail="Internal server error. Please try again."
)

try:
log_optimization(
result["original_prompt"],
result["optimized_prompt"],
result["changes"],
)
except Exception as e:
logger.error(f"Failed to log optimization: {e}")

return PromptResponse(**result)
6 changes: 6 additions & 0 deletions docker-compose-ai-eng/solution-files/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
fastapi==0.133.0
uvicorn==0.40.0
openai==2.26.0
pydantic==2.11.7
python-dotenv==1.1.0
psycopg2-binary==2.9.10
60 changes: 60 additions & 0 deletions docker-compose-ai-eng/solution-files/service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from openai import OpenAI
from dotenv import load_dotenv
import json
import os

load_dotenv()

client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
base_url="https://api.openai.com/v1"
)

def optimize_prompt(prompt: str, goal: str) -> dict:
"""Send a prompt to the LLM for optimization and return structured results."""

system_message = """You are a prompt engineering expert. Your job is to improve
prompts so they produce better results from language models.

You will receive an original prompt and a goal describing what the prompt should accomplish.
Return your response as a JSON object with exactly these fields:
- "optimized_prompt": the improved version of the prompt
- "changes": a brief explanation of what you improved and why

Return ONLY the JSON object. No markdown formatting, no extra text."""

user_message = f"""Original prompt: {prompt}

Goal: {goal}

Optimize this prompt to better achieve the stated goal."""

response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
],
response_format={"type": "json_object"},
temperature=0.7
)

result_text = response.choices[0].message.content

try:
result = json.loads(result_text)
except json.JSONDecodeError:
raise ValueError(
f"LLM returned invalid JSON: {result_text[:200]}"
)

if "optimized_prompt" not in result or "changes" not in result:
raise ValueError(
f"LLM response missing required fields. Got: {list(result.keys())}"
)

return {
"original_prompt": prompt,
"optimized_prompt": result["optimized_prompt"],
"changes": result["changes"]
}
10 changes: 10 additions & 0 deletions docker-compose-ai-eng/starter-files/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FROM python:3.10-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY main.py service.py ./

CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
48 changes: 48 additions & 0 deletions docker-compose-ai-eng/starter-files/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
from service import optimize_prompt
import logging

logger = logging.getLogger(__name__)

app = FastAPI()

@app.get("/health")
def health_check():
return {"status": "ok"}

class PromptRequest(BaseModel):
prompt: str = Field(
description="The original prompt to optimize"
)
goal: str = Field(
description="What the prompt should accomplish"
)
model_config = {"extra": "forbid"}

class PromptResponse(BaseModel):
original_prompt: str = Field(
description="The original prompt that was submitted"
)
optimized_prompt: str = Field(
description="The improved version of the prompt"
)
changes: str = Field(
description="Explanation of what was improved and why"
)

@app.post("/optimize", response_model=PromptResponse)
def optimize_prompt_endpoint(request: PromptRequest):
try:
result = optimize_prompt(request.prompt, request.goal)
except ValueError as e:
logger.error(f"Optimization failed: {e}")
raise HTTPException(status_code=502, detail="Invalid upstream LLM response. Please retry.")
except Exception as e:
logger.error(f"Unexpected error: {e}")
raise HTTPException(
status_code=500,
detail="Internal server error. Please try again."
)

return PromptResponse(**result)
5 changes: 5 additions & 0 deletions docker-compose-ai-eng/starter-files/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
fastapi==0.133.0
uvicorn==0.40.0
openai==2.26.0
pydantic==2.11.7
python-dotenv==1.1.0
60 changes: 60 additions & 0 deletions docker-compose-ai-eng/starter-files/service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from openai import OpenAI
from dotenv import load_dotenv
import json
import os

load_dotenv()

client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
base_url="https://api.openai.com/v1"
)

def optimize_prompt(prompt: str, goal: str) -> dict:
"""Send a prompt to the LLM for optimization and return structured results."""

system_message = """You are a prompt engineering expert. Your job is to improve
prompts so they produce better results from language models.

You will receive an original prompt and a goal describing what the prompt should accomplish.
Return your response as a JSON object with exactly these fields:
- "optimized_prompt": the improved version of the prompt
- "changes": a brief explanation of what you improved and why

Return ONLY the JSON object. No markdown formatting, no extra text."""

user_message = f"""Original prompt: {prompt}

Goal: {goal}

Optimize this prompt to better achieve the stated goal."""

response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
],
response_format={"type": "json_object"},
temperature=0.7
)

result_text = response.choices[0].message.content

try:
result = json.loads(result_text)
except json.JSONDecodeError:
raise ValueError(
f"LLM returned invalid JSON: {result_text[:200]}"
)

if "optimized_prompt" not in result or "changes" not in result:
raise ValueError(
f"LLM response missing required fields. Got: {list(result.keys())}"
)

return {
"original_prompt": prompt,
"optimized_prompt": result["optimized_prompt"],
"changes": result["changes"]
}