qwen-finetuned / app.py
Nishath2025's picture
Update app.py
6fc5c0f verified
from fastapi import FastAPI, Request
from pydantic import BaseModel
from model_loader import load_model
import torch
import logging
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI()
# Global variables for model and tokenizer
tokenizer = None
model = None
@app.on_event("startup")
async def startup_event():
global tokenizer, model
logger.info("Loading model and tokenizer...")
try:
tokenizer, model = load_model()
model.eval()
logger.info("Model and tokenizer loaded successfully!")
logger.info("FastAPI application is ready to serve requests")
except Exception as e:
logger.error(f"Failed to load model: {e}")
raise e
class PromptRequest(BaseModel):
prompt: str
@app.get("/")
async def root():
return {"message": "Qwen Finetuned Model API is running!"}
@app.get("/health")
async def health_check():
if model is None or tokenizer is None:
return {"status": "unhealthy", "message": "Model not loaded"}
return {"status": "healthy", "message": "Model is ready"}
@app.post("/generate")
async def generate_text(request: PromptRequest):
if model is None or tokenizer is None:
return {"error": "Model not loaded yet"}
prompt = request.prompt
if not prompt:
return {"error": "Prompt is missing"}
try:
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(**inputs, max_new_tokens=400,repetition_penalty=1.1,temperature=0.3)
full_response = tokenizer.decode(output[0], skip_special_tokens=True)
generated_text = full_response[len(prompt):].strip()
return {"response": generated_text}
except Exception as e:
logger.error(f"Error during text generation: {e}")
return {"error": f"Generation failed: {str(e)}"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)