Instructions to use halimb/sam-vit-base with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use halimb/sam-vit-base with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("mask-generation", model="halimb/sam-vit-base")# Load model directly from transformers import AutoProcessor, AutoModelForMaskGeneration processor = AutoProcessor.from_pretrained("halimb/sam-vit-base") model = AutoModelForMaskGeneration.from_pretrained("halimb/sam-vit-base") - Notebooks
- Google Colab
- Kaggle
| from typing import Dict, List, Any | |
| from transformers import pipeline | |
| import transformers | |
| from PIL import Image | |
| import base64 | |
| from io import BytesIO | |
| from PIL import Image | |
| import requests | |
| from transformers import pipeline | |
| print('IN HANDLER...') | |
| print(transformers.__version__) | |
| class EndpointHandler(): | |
| def __init__(self, path=""): | |
| # load pipe | |
| self.pipe = pipeline("mask-generation", device = 0, points_per_batch = 256) | |
| def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: | |
| base64_image = data.pop("inputs",data) | |
| if base64_image is None: | |
| raise ValueError("No image provided") | |
| if base64_image.startswith('data:image/jpeg;base64,'): | |
| base64_image = base64_image.replace('data:image/jpeg;base64,', '') | |
| image_bytes = base64.b64decode(base64_image) | |
| image = Image.open(BytesIO(image_bytes)) | |
| masks = self.pipe(image, points_per_batch = 256) | |
| return depth |