igmeMarcial commited on
Commit
a6b8ede
·
1 Parent(s): 4ff9d20
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import torch
2
  from PIL import Image
3
- from fastapi import FastAPI, HTTPException
4
  from fastapi.middleware.cors import CORSMiddleware
5
  from transformers import AutoProcessor, AutoModelForVision2Seq
6
  from pydantic import BaseModel
@@ -33,7 +33,7 @@ class PredictRequest(BaseModel):
33
  imageBase64URL: str
34
 
35
 
36
- @app.post("/v1/chat/completions")
37
  async def predict(request: PredictRequest):
38
  try:
39
  header, base64_string = request.imageBase64URL.split(',', 1)
@@ -62,5 +62,7 @@ async def predict(request: PredictRequest):
62
 
63
 
64
  @app.get("/")
65
- async def read_root():
 
 
66
  return {"message": "SmolVLM-500M API is running!"}
 
1
  import torch
2
  from PIL import Image
3
+ from fastapi import FastAPI, HTTPException,Request
4
  from fastapi.middleware.cors import CORSMiddleware
5
  from transformers import AutoProcessor, AutoModelForVision2Seq
6
  from pydantic import BaseModel
 
33
  imageBase64URL: str
34
 
35
 
36
+ @app.post("/predict")
37
  async def predict(request: PredictRequest):
38
  try:
39
  header, base64_string = request.imageBase64URL.split(',', 1)
 
62
 
63
 
64
  @app.get("/")
65
+ async def read_root(request: Request):
66
+ current_path = request.url.path
67
+ print(f"Received GET request at path: {current_path}")
68
  return {"message": "SmolVLM-500M API is running!"}