File size: 1,230 Bytes
f79882c 76bd90f e0add65 76bd90f eebd380 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import os
os.environ["HF_HOME"] = "/tmp"
os.environ["HF_TRANSFORMERS_CACHE"] = "/tmp/huggingface"
from fastapi import FastAPI, Request
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch
app = FastAPI()
base_model_id = "unsloth/gemma-3n-E2B"
lora_adapter_id = "Rudra1729/lora-adapters"
base_model = AutoModelForCausalLM.from_pretrained(base_model_id, torch_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(base_model_id)
model = PeftModel.from_pretrained(base_model, lora_adapter_id)
@app.get("/")
def read_root():
return {"message": "Nidaan-AI FastAPI backend is running. Use /generate to interact with the model."}
@app.post("/generate")
async def generate_text(request: Request):
body = await request.json()
prompt = body.get("prompt", "")
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=256)
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"response": result}
if __name__ == "__main__":
import uvicorn
port = int(os.environ.get("PORT", 8080)) # Use 8080 by default for Cloud Run
uvicorn.run(app, host="0.0.0.0", port=port)
|