|
import os |
|
os.environ["HF_HOME"] = "/tmp" |
|
os.environ["HF_TRANSFORMERS_CACHE"] = "/tmp/huggingface" |
|
from fastapi import FastAPI, Request |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from peft import PeftModel |
|
import torch |
|
|
|
app = FastAPI() |
|
|
|
base_model_id = "unsloth/gemma-3n-E2B" |
|
lora_adapter_id = "Rudra1729/lora-adapters" |
|
|
|
base_model = AutoModelForCausalLM.from_pretrained(base_model_id, torch_dtype=torch.float16) |
|
tokenizer = AutoTokenizer.from_pretrained(base_model_id) |
|
model = PeftModel.from_pretrained(base_model, lora_adapter_id) |
|
|
|
@app.get("/") |
|
def read_root(): |
|
return {"message": "Nidaan-AI FastAPI backend is running. Use /generate to interact with the model."} |
|
|
|
@app.post("/generate") |
|
async def generate_text(request: Request): |
|
body = await request.json() |
|
prompt = body.get("prompt", "") |
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
outputs = model.generate(**inputs, max_new_tokens=256) |
|
result = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return {"response": result} |
|
|
|
|
|
if __name__ == "__main__": |
|
import uvicorn |
|
port = int(os.environ.get("PORT", 8080)) |
|
uvicorn.run(app, host="0.0.0.0", port=port) |
|
|
|
|