File size: 931 Bytes
62a2c21
 
 
 
 
 
 
 
 
 
 
 
 
 
0d6de18
 
62a2c21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from fastapi import FastAPI
from pydantic import BaseModel
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

app = FastAPI(title="DeepSeek R1 Distill Llama API")

# Load model and tokenizer
model_name = "ai/deepseek-r1-distill-llama"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16,
    device_map="auto"
)

class Request(BaseModel):
    prompt: str
    max_length: int = 100

@app.post("/generate")
def generate_text(request: Request):
    inputs = tokenizer(request.prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(
        **inputs,
        max_length=request.max_length
    )
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return {"generated_text": generated_text}

@app.get("/")
def home():
    return {"message": "DeepSeek R1 API is running!"}