Instructions to use halimb/sam-vit-base with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use halimb/sam-vit-base with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("mask-generation", model="halimb/sam-vit-base")# Load model directly from transformers import AutoProcessor, AutoModelForMaskGeneration processor = AutoProcessor.from_pretrained("halimb/sam-vit-base") model = AutoModelForMaskGeneration.from_pretrained("halimb/sam-vit-base") - Notebooks
- Google Colab
- Kaggle
File size: 974 Bytes
4c50bc8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | from typing import Dict, List, Any
from transformers import pipeline
import transformers
from PIL import Image
import base64
from io import BytesIO
from PIL import Image
import requests
from transformers import pipeline
print('IN HANDLER...')
print(transformers.__version__)
class EndpointHandler():
def __init__(self, path=""):
# load pipe
self.pipe = pipeline("mask-generation", device = 0, points_per_batch = 256)
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
base64_image = data.pop("inputs",data)
if base64_image is None:
raise ValueError("No image provided")
if base64_image.startswith('data:image/jpeg;base64,'):
base64_image = base64_image.replace('data:image/jpeg;base64,', '')
image_bytes = base64.b64decode(base64_image)
image = Image.open(BytesIO(image_bytes))
masks = self.pipe(image, points_per_batch = 256)
return depth |