Spaces:
Running
Running
| """ | |
| FastAPI backend for LionGuard moderation | |
| """ | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.staticfiles import StaticFiles | |
| from fastapi.responses import FileResponse | |
| from fastapi.middleware.cors import CORSMiddleware | |
| import os | |
| from typing import List | |
| from models import ( | |
| ModerateRequest, | |
| ModerateResponse, | |
| FeedbackRequest, | |
| FeedbackResponse, | |
| ChatRequest, | |
| ChatResponse, | |
| CategoryScore, | |
| ChatHistories, | |
| ) | |
| from services import ( | |
| analyze_text, | |
| submit_feedback, | |
| process_chat_message, | |
| ) | |
| app = FastAPI( | |
| title="LionGuard API", | |
| description="Multilingual moderation and guardrail comparison", | |
| version="2.0.0" | |
| ) | |
| # Enable CORS for development | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Get the path to frontend directory | |
| FRONTEND_DIR = os.path.join(os.path.dirname(__file__), "../frontend") | |
| async def moderate_text(request: ModerateRequest): | |
| """ | |
| Analyze text for moderation risks using LionGuard models | |
| """ | |
| try: | |
| result = analyze_text(request.text, request.model) | |
| return ModerateResponse(**result) | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Error analyzing text: {str(e)}") | |
| async def send_feedback(request: FeedbackRequest): | |
| """ | |
| Submit user feedback on moderation result | |
| """ | |
| try: | |
| result = submit_feedback(request.text_id, request.agree) | |
| return FeedbackResponse(**result) | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Error submitting feedback: {str(e)}") | |
| async def chat_comparison(request: ChatRequest): | |
| """ | |
| Compare guardrails across three approaches: | |
| - No moderation | |
| - OpenAI moderation | |
| - LionGuard moderation | |
| """ | |
| try: | |
| # Convert request histories to list of dicts | |
| history_no_mod = [msg.dict() for msg in request.histories.no_moderation] | |
| history_openai = [msg.dict() for msg in request.histories.openai_moderation] | |
| history_lg = [msg.dict() for msg in request.histories.lionguard] | |
| # Process message | |
| updated_no_mod, updated_openai, updated_lg, lg_score = await process_chat_message( | |
| request.message, | |
| request.model, | |
| history_no_mod, | |
| history_openai, | |
| history_lg | |
| ) | |
| # Convert back to response format | |
| return ChatResponse( | |
| histories=ChatHistories( | |
| no_moderation=updated_no_mod, | |
| openai_moderation=updated_openai, | |
| lionguard=updated_lg | |
| ), | |
| lionguard_score=lg_score | |
| ) | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Error processing chat: {str(e)}") | |
| # Serve static frontend files | |
| app.mount("/static", StaticFiles(directory=FRONTEND_DIR), name="static") | |
| async def serve_frontend(): | |
| """ | |
| Serve the main HTML page | |
| """ | |
| index_path = os.path.join(FRONTEND_DIR, "index.html") | |
| if os.path.exists(index_path): | |
| return FileResponse(index_path) | |
| raise HTTPException(status_code=404, detail="Frontend not found") | |
| async def health_check(): | |
| """ | |
| Health check endpoint | |
| """ | |
| return {"status": "healthy", "service": "lionguard-api"} | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=8000) | |