from fastapi import FastAPI, WebSocket from devon_consciousness import Devon, DevonConsciousness, DevonSubconscious import asyncio import torch class DevonAI: def __init__(self): self.app = FastAPI() self.devon = Devon() # Initialize consciousness system self.setup_models() self.setup_routes() def setup_models(self): """Initialize base models and tokenizers""" try: # Original model initialization self.neox_tokenizer = AutoTokenizer.from_pretrained("/opt/models/gpt-neox-20B") self.neox_model = AutoModelForCausalLM.from_pretrained( "/opt/models/gpt-neox-20B", torch_dtype=torch.float16, local_files_only=True ) self.mistral_model = AutoModelForCausalLM.from_pretrained( "/opt/models/Mistral-7B-v0.1", torch_dtype=torch.float16, local_files_only=True ) # Connect models to consciousness self.devon.conscious.legal_analysis.set_models( self.neox_model, self.mistral_model, self.neox_tokenizer ) print("✅ Models Loaded Successfully") except Exception as e: print(f"🚨 Error loading models: {e}") raise def setup_routes(self): """Setup FastAPI routes""" @self.app.get("/health") async def health_check(): return { "status": "Devon AI is operational", "consciousness_active": self.devon.conscious.active_focus is not None } @self.app.websocket("/ws") async def websocket_endpoint(websocket: WebSocket): await self.handle_websocket(websocket) @self.app.post("/think") async def process_thought(data: dict): # Process through consciousness system response = await self.devon.interact(data) return response async def handle_websocket(self, websocket: WebSocket): """Handle WebSocket connections""" await websocket.accept() try: while True: data = await websocket.receive_json() response = await self.devon.interact(data) await websocket.send_json(response) except Exception as e: print(f"WebSocket error: {e}") # Create Devon instance devon_ai = DevonAI() app = devon_ai.app if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)