input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import logging
import os
import sys
import tarfile
from easynmt import EasyNMT
from sentence_transformers import LoggingHandler, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
target_lang = sys.argv[1]
output_folder = "multilingual-data"
data_folder = "../msmarco-data"
output_filename = os.path.join(output_folder, "train_queries.en-{}.tsv".format(target_lang))
os.makedirs(output_folder, exist_ok=True)
## Does the output file exists? If yes, read it so we can continue the translation
translated_qids = set()
if os.path.exists(output_filename):
with open(output_filename, "r", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
translated_qids.add(splits[0])
### Now we read the MS Marco dataset
os.makedirs(data_folder, exist_ok=True)
# Read qrels file for relevant positives per query
train_queries = {}
qrels_train = os.path.join(data_folder, "qrels.train.tsv")
if not os.path.exists(qrels_train):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.train.tsv", qrels_train)
with open(qrels_train) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split()
if qid not in translated_qids:
train_queries[qid] = None
# Read all queries
queries_filepath = os.path.join(data_folder, "queries.train.tsv")
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, "queries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/queries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, "r", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
if qid in train_queries:
train_queries[qid] = query.strip()
qids = [qid for qid in train_queries if train_queries[qid] is not None]
queries = [train_queries[qid] for qid in qids]
# Define our translation model
translation_model = EasyNMT("opus-mt")
print("Start translation of {} queries.".format(len(queries)))
print("This can take a while. But you can stop this script at any point")
with open(output_filename, "a" if os.path.exists(output_filename) else "w", encoding="utf8") as fOut:
for qid, query, translated_query in zip(
qids,
queries,
translation_model.translate_stream(
queries,
source_lang="en",
target_lang=target_lang,
beam_size=2,
perform_sentence_splitting=False,
chunk_size=256,
batch_size=64,
),
):
fOut.write("{}\t{}\n".format(qid, translated_query.replace("\t", " ")))
fOut.flush()
|
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import os
from sentence_transformers import LoggingHandler, util
import logging
import tarfile
from easynmt import EasyNMT
import sys
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
target_lang = sys.argv[1]
output_folder = "multilingual-data"
data_folder = "../msmarco-data"
output_filename = os.path.join(output_folder, "train_queries.en-{}.tsv".format(target_lang))
os.makedirs(output_folder, exist_ok=True)
## Does the output file exists? If yes, read it so we can continue the translation
translated_qids = set()
if os.path.exists(output_filename):
with open(output_filename, "r", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
translated_qids.add(splits[0])
### Now we read the MS Marco dataset
os.makedirs(data_folder, exist_ok=True)
# Read qrels file for relevant positives per query
train_queries = {}
qrels_train = os.path.join(data_folder, "qrels.train.tsv")
if not os.path.exists(qrels_train):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.train.tsv", qrels_train)
with open(qrels_train) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split()
if qid not in translated_qids:
train_queries[qid] = None
# Read all queries
queries_filepath = os.path.join(data_folder, "queries.train.tsv")
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, "queries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/queries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, "r", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
if qid in train_queries:
train_queries[qid] = query.strip()
qids = [qid for qid in train_queries if train_queries[qid] is not None]
queries = [train_queries[qid] for qid in qids]
# Define our translation model
translation_model = EasyNMT("opus-mt")
print("Start translation of {} queries.".format(len(queries)))
print("This can take a while. But you can stop this script at any point")
with open(output_filename, "a" if os.path.exists(output_filename) else "w", encoding="utf8") as fOut:
for qid, query, translated_query in zip(
qids,
queries,
translation_model.translate_stream(
queries,
source_lang="en",
target_lang=target_lang,
beam_size=2,
perform_sentence_splitting=False,
chunk_size=256,
batch_size=64,
),
):
fOut.write("{}\t{}\n".format(qid, translated_query.replace("\t", " ")))
fOut.flush()
|
"""Wordpress reader."""
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WordpressReader(BaseReader):
"""
Wordpress reader. Reads data from a Wordpress workspace.
Args:
url (str): Base URL of the WordPress site.
username (Optional[str]): WordPress username for authentication.
password (Optional[str]): WordPress password for authentication.
get_pages (bool): Retrieve static WordPress 'pages'. Default True.
get_posts (bool): Retrieve WordPress 'posts' (blog entries). Default True.
additional_post_types (Optional[str]): Comma-separated list of additional post types to retrieve
(e.g., 'my-custom-page,webinars'). Default is None.
"""
def __init__(
self,
url: str,
username: Optional[str] = None,
password: Optional[str] = None,
get_pages: bool = True,
get_posts: bool = True,
additional_post_types: Optional[str] = None,
) -> None:
"""Initialize Wordpress reader."""
self.url = url
self.username = username
self.password = password
# Use a set to prevent duplicates
self.post_types = set()
# Add default types based on backward-compatible options
if get_pages:
self.post_types.add("pages")
if get_posts:
self.post_types.add("posts")
# Add any additional post types specified
if additional_post_types:
self.post_types.update(
post_type.strip() for post_type in additional_post_types.split(",")
)
# Convert post_types back to a list
self.post_types = list(self.post_types)
def load_data(self) -> List[Document]:
"""
Load data from the specified post types.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup, GuessedAtParserWarning
# Suppressing this warning because guessing at the parser is the
# desired behavior -- we don't want to force lxml on packages
# where it's not installed.
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
results = []
articles = []
# Fetch articles for each specified post type
for post_type in self.post_types:
articles.extend(self.get_all_posts(post_type))
# Process each article to extract content and metadata
for article in articles:
body = article.get("content", {}).get("rendered", None)
if body is None:
body = article.get("content")
soup = BeautifulSoup(body)
body = soup.get_text()
title = article.get("title", {}).get("rendered", None) or article.get(
"title"
)
extra_info = {
"id": article["id"],
"title": title,
"url": article["link"],
"updated_at": article["modified"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_posts(self, post_type: str) -> List[dict]:
"""Retrieve all posts of a specific type, handling pagination."""
posts = []
next_page = 1
while True:
response = self.get_posts_page(post_type, next_page)
posts.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return posts
def get_posts_page(self, post_type: str, current_page: int = 1) -> dict:
"""Retrieve a single page of posts for a given post type."""
import requests
url = f"{self.url}/wp-json/wp/v2/{post_type}?per_page=100&page={current_page}"
# Handle authentication if username and password are provided
auth = (
(self.username, self.password) if self.username and self.password else None
)
response = requests.get(url, auth=auth)
response.raise_for_status() # Raise an error for bad responses
headers = response.headers
num_pages = int(headers.get("X-WP-TotalPages", 1))
next_page = current_page + 1 if num_pages > current_page else None
articles = response.json()
return {"articles": articles, "next_page": next_page}
|
"""Wordpress reader."""
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WordpressReader(BaseReader):
"""
Wordpress reader. Reads data from a Wordpress workspace.
Args:
url (str): Base URL of the WordPress site.
username (Optional[str]): WordPress username for authentication.
password (Optional[str]): WordPress password for authentication.
get_pages (bool): Retrieve static WordPress 'pages'. Default True.
get_posts (bool): Retrieve WordPress 'posts' (blog entries). Default True.
additional_post_types (Optional[str]): Comma-separated list of additional post types to retrieve
(e.g., 'my-custom-page,webinars'). Default is None.
"""
def __init__(
self,
url: str,
username: Optional[str] = None,
password: Optional[str] = None,
get_pages: bool = True,
get_posts: bool = True,
additional_post_types: Optional[str] = None,
) -> None:
"""Initialize Wordpress reader."""
self.url = url
self.username = username
self.password = password
# Use a set to prevent duplicates
self.post_types = set()
# Add default types based on backward-compatible options
if get_pages:
self.post_types.add("pages")
if get_posts:
self.post_types.add("posts")
# Add any additional post types specified
if additional_post_types:
self.post_types.update(
post_type.strip() for post_type in additional_post_types.split(",")
)
# Convert post_types back to a list
self.post_types = list(self.post_types)
def load_data(self) -> List[Document]:
"""
Load data from the specified post types.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup, GuessedAtParserWarning
# Suppressing this warning because guessing at the parser is the
# desired behavior -- we don't want to force lxml on packages
# where it's not installed.
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
results = []
articles = []
# Fetch articles for each specified post type
for post_type in self.post_types:
articles.extend(self.get_all_posts(post_type))
# Process each article to extract content and metadata
for article in articles:
body = article.get("content", {}).get("rendered", None)
if body is None:
body = article.get("content")
soup = BeautifulSoup(body)
body = soup.get_text()
title = article.get("title", {}).get("rendered", None) or article.get(
"title"
)
extra_info = {
"id": article["id"],
"title": title,
"url": article["link"],
"updated_at": article["modified"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_posts(self, post_type: str) -> List[dict]:
"""Retrieve all posts of a specific type, handling pagination."""
posts = []
next_page = 1
while True:
response = self.get_posts_page(post_type, next_page)
posts.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return posts
def get_posts_page(self, post_type: str, current_page: int = 1) -> dict:
"""Retrieve a single page of posts for a given post type."""
import requests
url = f"{self.url}/wp-json/wp/v2/{post_type}?per_page=100&page={current_page}"
# Handle authentication if username and password are provided
auth = (
(self.username, self.password) if self.username and self.password else None
)
response = requests.get(url, auth=auth)
response.raise_for_status() # Raise an error for bad responses
headers = response.headers
num_pages = int(headers.get("X-WP-TotalPages", 1))
next_page = current_page + 1 if num_pages > current_page else None
articles = response.json()
return {"articles": articles, "next_page": next_page}
|
_base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py'
# learning policy
lr_config = dict(step=[20, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from utils import foo
from jina import Executor
class DummyHubExecutorAbs(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
foo()
|
from helper import foo
from jina import Executor
class DummyHubExecutorAbs(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
foo()
|
_base_ = './detic_centernet2_r50_fpn_4x_lvis_boxsup.py'
dataset_type = ['LVISV1Dataset', 'ImageNetLVISV1Dataset']
image_size_det = (640, 640)
image_size_cls = (320, 320)
# backend = 'pillow'
backend_args = None
train_pipeline_det = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size_det,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_det,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_pipeline_cls = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=False, with_label=True),
dict(
type='RandomResize',
scale=image_size_cls,
ratio_range=(0.5, 1.5),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_cls,
recompute_bbox=False,
bbox_clip_border=False,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_det = dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='LVISV1Dataset',
data_root='data/lvis/',
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline_det,
backend_args=backend_args))
dataset_cls = dict(
type='ImageNetLVISV1Dataset',
data_root='data/imagenet',
ann_file='annotations/imagenet_lvis_image_info.json',
data_prefix=dict(img='ImageNet-LVIS/'),
pipeline=train_pipeline_cls,
backend_args=backend_args)
train_dataloader = dict(
_delete_=True,
batch_size=[8, 32],
num_workers=2,
persistent_workers=True,
sampler=dict(type='MultiDataSampler', dataset_ratio=[1, 4]),
batch_sampler=dict(
type='MultiDataAspectRatioBatchSampler', num_datasets=2),
dataset=dict(type='ConcatDataset', datasets=[dataset_det, dataset_cls]))
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='CosineAnnealingLR',
begin=0,
by_epoch=False,
T_max=90000,
)
]
load_from = './first_stage/detic_centernet2_r50_fpn_4x_lvis_boxsup.pth'
find_unused_parameters = True
|
_base_ = './detic_centernet2_r50_fpn_4x_lvis_boxsup.py'
image_size_det = (640, 640)
image_size_cls = (320, 320)
# backend = 'pillow'
backend_args = None
train_pipeline_det = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size_det,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_det,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_pipeline_cls = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=False, with_label=True),
dict(
type='RandomResize',
scale=image_size_cls,
ratio_range=(0.5, 1.5),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_cls,
recompute_bbox=False,
bbox_clip_border=False,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_det = dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='LVISV1Dataset',
data_root='data/lvis/',
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline_det,
backend_args=backend_args))
dataset_cls = dict(
type='ImageNetLVISV1Dataset',
data_root='data/imagenet',
ann_file='annotations/imagenet_lvis_image_info.json',
data_prefix=dict(img='ImageNet-LVIS/'),
pipeline=train_pipeline_cls,
backend_args=backend_args)
train_dataloader = dict(
_delete_=True,
batch_size=[8, 32],
num_workers=2,
persistent_workers=True,
sampler=dict(type='MultiDataSampler', dataset_ratio=[1, 4]),
batch_sampler=dict(
type='MultiDataAspectRatioBatchSampler', num_datasets=2),
dataset=dict(type='ConcatDataset', datasets=[dataset_det, dataset_cls]))
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=1000)
]
load_from = './first_stage/detic_centernet2_r50_fpn_4x_lvis_boxsup.pth'
find_unused_parameters = True
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param device: device to use for encoding ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, List, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
require_gpu: bool = False,
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param require_gpu: device to use for encoding ['cuda', 'cpu] - if not set,
the device is detected automatically
:param default_batch_size: Default batch size, used if ``batch_size`` is not
provided as a parameter in the request
:param default_traversal_paths: Default traversal paths, used if ``traversal_paths``
are not provided as a parameter in the request.
"""
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if require_gpu:
spacy.require_gpu()
if download_data:
subprocess.run(
['python', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
doc.embedding = spacy_doc.vector
|
"""Simple file node parser."""
from typing import Any, Dict, List, Optional, Sequence, Type
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.node_parser.file.html import HTMLNodeParser
from llama_index.core.node_parser.file.json import JSONNodeParser
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.utils import get_tqdm_iterable
FILE_NODE_PARSERS: Dict[str, Type[NodeParser]] = {
".md": MarkdownNodeParser,
".html": HTMLNodeParser,
".json": JSONNodeParser,
}
class SimpleFileNodeParser(NodeParser):
"""
Simple file node parser.
Splits a document loaded from a file into Nodes using logic based on the file type
automatically detects the NodeParser to use based on file type
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "SimpleFileNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "SimpleFileNodeParser"
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""
Parse document into nodes.
Args:
nodes (Sequence[BaseNode]): nodes to parse
"""
all_nodes: List[BaseNode] = []
documents_with_progress = get_tqdm_iterable(
nodes, show_progress, "Parsing documents into nodes"
)
for document in documents_with_progress:
ext = document.metadata.get("extension", "None")
if ext in FILE_NODE_PARSERS:
parser = FILE_NODE_PARSERS[ext](
include_metadata=self.include_metadata,
include_prev_next_rel=self.include_prev_next_rel,
callback_manager=self.callback_manager,
)
nodes = parser.get_nodes_from_documents([document], show_progress)
all_nodes.extend(nodes)
else:
# What to do when file type isn't supported yet?
all_nodes.extend(
# build node from document
build_nodes_from_splits(
[document.get_content(metadata_mode=MetadataMode.NONE)],
document,
id_func=self.id_func,
)
)
return all_nodes
|
"""Simple file node parser."""
from typing import Any, Dict, List, Optional, Sequence, Type
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.node_parser.file.html import HTMLNodeParser
from llama_index.core.node_parser.file.json import JSONNodeParser
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.utils import get_tqdm_iterable
FILE_NODE_PARSERS: Dict[str, Type[NodeParser]] = {
".md": MarkdownNodeParser,
".html": HTMLNodeParser,
".json": JSONNodeParser,
}
class SimpleFileNodeParser(NodeParser):
"""Simple file node parser.
Splits a document loaded from a file into Nodes using logic based on the file type
automatically detects the NodeParser to use based on file type
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "SimpleFileNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "SimpleFileNodeParser"
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Parse document into nodes.
Args:
nodes (Sequence[BaseNode]): nodes to parse
"""
all_nodes: List[BaseNode] = []
documents_with_progress = get_tqdm_iterable(
nodes, show_progress, "Parsing documents into nodes"
)
for document in documents_with_progress:
ext = document.metadata.get("extension", "None")
if ext in FILE_NODE_PARSERS:
parser = FILE_NODE_PARSERS[ext](
include_metadata=self.include_metadata,
include_prev_next_rel=self.include_prev_next_rel,
callback_manager=self.callback_manager,
)
nodes = parser.get_nodes_from_documents([document], show_progress)
all_nodes.extend(nodes)
else:
# What to do when file type isn't supported yet?
all_nodes.extend(
# build node from document
build_nodes_from_splits(
[document.get_content(metadata_mode=MetadataMode.NONE)],
document,
id_func=self.id_func,
)
)
return all_nodes
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
DocumentBlock,
CachePoint,
CacheControl,
CitableBlock,
CitationBlock,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
"DocumentBlock",
"CachePoint",
"CacheControl",
"CitableBlock",
"CitationBlock",
]
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
DocumentBlock,
CachePoint,
CacheControl,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
"DocumentBlock",
"CachePoint",
"CacheControl",
]
|
from pydantic import BaseModel
from inspect import Signature, Parameter
from typing import Any, Dict, Optional, List, Callable
from llama_index.core.llms import ChatMessage, AudioBlock, TextBlock, MessageRole
from llama_index.core.tools import BaseTool
def make_function_from_tool_model(
model_cls: type[BaseModel], tool: BaseTool
) -> Callable:
fields = model_cls.model_fields
parameters = [
Parameter(name, Parameter.POSITIONAL_OR_KEYWORD, annotation=field.annotation)
for name, field in fields.items()
]
sig = Signature(parameters)
def func_template(*args, **kwargs):
bound = func_template.__signature__.bind(*args, **kwargs)
bound.apply_defaults()
return tool(**bound.arguments).raw_output
func_template.__signature__ = sig
return func_template
def callback_user_message(
messages: Dict[int, List[ChatMessage]],
message_id: int,
text: Optional[str] = None,
audio: Optional[Any] = None,
) -> None:
if messages.get(message_id) is None:
if text:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text=text)])
)
else:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.USER, blocks=[AudioBlock(audio=audio)])
)
else:
last_user_messages = [
message
for message in messages[message_id]
if message.role == MessageRole.USER
]
if len(last_user_messages) > 0:
last_user_message = last_user_messages[-1]
else:
messages[message_id].append(ChatMessage(role=MessageRole.USER, blocks=[]))
last_user_message = [
message
for message in messages[message_id]
if message.role == MessageRole.USER
][-1]
if text:
last_user_message.blocks.append(TextBlock(text=text))
else:
last_user_message.blocks.append(AudioBlock(audio=audio))
def callback_agent_message(
messages: Dict[int, List[ChatMessage]],
message_id: int,
text: Optional[str] = None,
audio: Optional[Any] = None,
) -> None:
if messages.get(message_id) is None:
if text:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.ASSISTANT, blocks=[TextBlock(text=text)])
)
else:
messages[message_id] = []
messages[message_id].append(
ChatMessage(
role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=audio)]
)
)
else:
last_agent_messages = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
]
if len(last_agent_messages) > 0:
last_agent_message = last_agent_messages[-1]
else:
messages[message_id].append(
ChatMessage(role=MessageRole.ASSISTANT, blocks=[])
)
last_agent_message = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
][-1]
if text:
last_agent_message.blocks.append(TextBlock(text=text))
else:
last_agent_message.blocks.append(AudioBlock(audio=audio))
def callback_agent_message_correction(
messages: Dict[int, List[ChatMessage]], message_id: int, text: str
) -> None:
last_agent_message = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
][-1]
last_block = [
block for block in last_agent_message.blocks if block.block_type == "text"
][-1]
last_block.text = text
def callback_latency_measurement(latencies: List[int], latency: int) -> None:
latencies.append(latency)
|
from typing import Any, Dict, Optional, List
from llama_index.core.llms import ChatMessage, AudioBlock, TextBlock, MessageRole
def callback_user_message(
messages: Dict[int, List[ChatMessage]],
message_id: int,
text: Optional[str] = None,
audio: Optional[Any] = None,
) -> None:
if messages.get(message_id) is None:
if text:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text=text)])
)
else:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.USER, blocks=[AudioBlock(audio=audio)])
)
else:
last_user_messages = [
message
for message in messages[message_id]
if message.role == MessageRole.USER
]
if len(last_user_messages) > 0:
last_user_message = last_user_messages[-1]
else:
messages[message_id].append(ChatMessage(role=MessageRole.USER, blocks=[]))
last_user_message = [
message
for message in messages[message_id]
if message.role == MessageRole.USER
][-1]
if text:
last_user_message.blocks.append(TextBlock(text=text))
else:
last_user_message.blocks.append(AudioBlock(audio=audio))
def callback_agent_message(
messages: Dict[int, List[ChatMessage]],
message_id: int,
text: Optional[str] = None,
audio: Optional[Any] = None,
) -> None:
if messages.get(message_id) is None:
if text:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.ASSISTANT, blocks=[TextBlock(text=text)])
)
else:
messages[message_id] = []
messages[message_id].append(
ChatMessage(
role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=audio)]
)
)
else:
last_agent_messages = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
]
if len(last_agent_messages) > 0:
last_agent_message = last_agent_messages[-1]
else:
messages[message_id].append(
ChatMessage(role=MessageRole.ASSISTANT, blocks=[])
)
last_agent_message = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
][-1]
if text:
last_agent_message.blocks.append(TextBlock(text=text))
else:
last_agent_message.blocks.append(AudioBlock(audio=audio))
def callback_agent_message_correction(
messages: Dict[int, List[ChatMessage]], message_id: int, text: str
) -> None:
last_agent_message = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
][-1]
last_block = [
block for block in last_agent_message.blocks if block.block_type == "text"
][-1]
last_block.text = text
def callback_latency_measurement(latencies: List[int], latency: int) -> None:
latencies.append(latency)
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
return AgentAction(
response["action"], response.get("action_input", {}), text
)
else:
return AgentFinish({"output": text}, text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
return AgentAction(
response["action"], response.get("action_input", {}), text
)
else:
return AgentFinish({"output": text}, text)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token(hf_api: HfApi):
previous_token = HfFolder.get_token()
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(previous_token)
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
from datasets.utils._hf_hub_fixes import create_repo, delete_repo
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token(hf_api: HfApi):
previous_token = HfFolder.get_token()
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(previous_token)
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
delete_repo(hf_api, repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
create_repo(hf_api, repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
delete_repo(hf_api, repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
create_repo(hf_api, repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
delete_repo(hf_api, repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
create_repo(hf_api, repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
delete_repo(hf_api, repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for *LJSpeech-1.1* [:footcite:`ljspeech17`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for *LJSpeech-1.1* [:footcite:`ljspeech17`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
"""This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
import torch
from torchvision import datapoints
from torchvision.transforms import v2
class PadIfSmaller(v2.Transform):
def __init__(self, size, fill=0):
super().__init__()
self.size = size
self.fill = v2._utils._setup_fill_arg(fill)
def _get_params(self, sample):
_, height, width = v2._utils.query_chw(sample)
padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
needs_padding = any(padding)
return dict(padding=padding, needs_padding=needs_padding)
def _transform(self, inpt, params):
if not params["needs_padding"]:
return inpt
fill = v2._utils._get_fill(self.fill, type(inpt))
fill = v2._utils._convert_fill_arg(fill)
return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
class CocoDetectionToVOCSegmentation(v2.Transform):
"""Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
This is achieved in two steps:
1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
present in VOC are dropped and replaced by background.
2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
mask while pixels that belong to multiple detection masks are marked as invalid.
"""
COCO_TO_VOC_LABEL_MAP = dict(
zip(
[0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
range(21),
)
)
INVALID_VALUE = 255
def _coco_detection_masks_to_voc_segmentation_mask(self, target):
if "masks" not in target:
return None
instance_masks, instance_labels_coco = target["masks"], target["labels"]
valid_labels_voc = [
(idx, label_voc)
for idx, label_coco in enumerate(instance_labels_coco.tolist())
if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
]
if not valid_labels_voc:
return None
valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
# Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
# there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
return segmentation_mask
def forward(self, image, target):
segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
if segmentation_mask is None:
segmentation_mask = torch.zeros(v2.functional.get_size(image), dtype=torch.uint8)
return image, datapoints.Mask(segmentation_mask)
|
"""This file only exists to be lazy-imported and avoid V2-related import warnings when just using V1."""
import torch
from torchvision import datapoints
from torchvision.transforms import v2
class PadIfSmaller(v2.Transform):
def __init__(self, size, fill=0):
super().__init__()
self.size = size
self.fill = v2._utils._setup_fill_arg(fill)
def _get_params(self, sample):
_, height, width = v2._utils.query_chw(sample)
padding = [0, 0, max(self.size - width, 0), max(self.size - height, 0)]
needs_padding = any(padding)
return dict(padding=padding, needs_padding=needs_padding)
def _transform(self, inpt, params):
if not params["needs_padding"]:
return inpt
fill = v2._utils._get_fill(self.fill, type(inpt))
fill = v2._utils._convert_fill_arg(fill)
return v2.functional.pad(inpt, padding=params["padding"], fill=fill)
class CocoDetectionToVOCSegmentation(v2.Transform):
"""Turn samples from datasets.CocoDetection into the same format as VOCSegmentation.
This is achieved in two steps:
1. COCO differentiates between 91 categories while VOC only supports 21, including background for both. Fortunately,
the COCO categories are a superset of the VOC ones and thus can be mapped. Instances of the 70 categories not
present in VOC are dropped and replaced by background.
2. COCO only offers detection masks, i.e. a (N, H, W) bool-ish tensor, where the truthy values in each individual
mask denote the instance. However, a segmentation mask is a (H, W) integer tensor (typically torch.uint8), where
the value of each pixel denotes the category it belongs to. The detection masks are merged into one segmentation
mask while pixels that belong to multiple detection masks are marked as invalid.
"""
COCO_TO_VOC_LABEL_MAP = dict(
zip(
[0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72],
range(21),
)
)
INVALID_VALUE = 255
def _coco_detection_masks_to_voc_segmentation_mask(self, target):
if "masks" not in target:
return None
instance_masks, instance_labels_coco = target["masks"], target["labels"]
valid_labels_voc = [
(idx, label_voc)
for idx, label_coco in enumerate(instance_labels_coco.tolist())
if (label_voc := self.COCO_TO_VOC_LABEL_MAP.get(label_coco)) is not None
]
if not valid_labels_voc:
return None
valid_voc_category_idcs, instance_labels_voc = zip(*valid_labels_voc)
instance_masks = instance_masks[list(valid_voc_category_idcs)].to(torch.uint8)
instance_labels_voc = torch.tensor(instance_labels_voc, dtype=torch.uint8)
# Calling `.max()` on the stacked detection masks works fine to separate background from foreground as long as
# there is at most a single instance per pixel. Overlapping instances will be filtered out in the next step.
segmentation_mask, _ = (instance_masks * instance_labels_voc.reshape(-1, 1, 1)).max(dim=0)
segmentation_mask[instance_masks.sum(dim=0) > 1] = self.INVALID_VALUE
return segmentation_mask
def forward(self, image, target):
segmentation_mask = self._coco_detection_masks_to_voc_segmentation_mask(target)
if segmentation_mask is None:
segmentation_mask = torch.zeros(v2.functional.get_spatial_size(image), dtype=torch.uint8)
return image, datapoints.Mask(segmentation_mask)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseDiffusersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseDiffusersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
from langchain_exa import (
ExaSearchResults, # type: ignore[import-not-found, import-not-found]
)
def test_search_tool() -> None:
tool = ExaSearchResults()
res = tool.invoke({"query": "best time to visit japan", "num_results": 5})
print(res) # noqa: T201
assert not isinstance(res, str) # str means error for this tool\
def test_search_tool_advanced_features() -> None:
tool = ExaSearchResults()
res = tool.invoke(
{
"query": "best time to visit japan",
"num_results": 3,
"text_contents_options": {"max_characters": 1000},
"summary": True,
"type": "auto",
}
)
print(res) # noqa: T201
assert not isinstance(res, str) # str means error for this tool
assert len(res.results) == 3
# Verify summary exists
assert hasattr(res.results[0], "summary")
# Verify text was limited
assert len(res.results[0].text) <= 1000
|
from langchain_exa import (
ExaSearchResults, # type: ignore[import-not-found, import-not-found]
)
def test_search_tool() -> None:
tool = ExaSearchResults()
res = tool.invoke({"query": "best time to visit japan", "num_results": 5})
print(res) # noqa: T201
assert not isinstance(res, str) # str means error for this tool\
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.ollama import OllamaEmbedding
def test_embedding_class():
emb = OllamaEmbedding(
model_name="", client_kwargs={"headers": {"Authorization": "Bearer token"}}
)
assert isinstance(emb, BaseEmbedding)
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.ollama import OllamaEmbedding
def test_embedding_class():
emb = OllamaEmbedding(model_name="")
assert isinstance(emb, BaseEmbedding)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
# Disable yapf because it conflicts with isort.
# yapf: disable
from .misc import (align_tensor, aligned_bilinear, center_of_mass,
empty_instances, filter_gt_instances,
filter_scores_and_topk, flip_tensor, generate_coordinate,
images_to_levels, interpolate_as, levels_to_images,
mask2ndarray, multi_apply, relative_coordinate_maps,
rename_loss_dict, reweight_loss_dict,
samplelist_boxtype2tensor, select_single_mlvl,
sigmoid_geometric_mean, unfold_wo_center, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer', 'align_tensor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer'
]
|
import logging
import sys
import traceback
from datasets import Dataset, load_dataset
from peft import LoraConfig, TaskType
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerModelCardData,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
)
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.losses import CachedMultipleNegativesRankingLoss
from sentence_transformers.training_args import BatchSamplers
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
model_name_only = model_name.split("/")[-1]
# 1. Load a model to finetune with 2. (Optional) model card data
model = SentenceTransformer(
model_name,
model_card_data=SentenceTransformerModelCardData(
language="en",
license="apache-2.0",
model_name=f"{model_name_only} adapter finetuned on GooAQ pairs",
),
)
# Create a LoRA adapter for the model
peft_config = LoraConfig(
task_type=TaskType.FEATURE_EXTRACTION,
inference_mode=False,
r=64,
lora_alpha=128,
lora_dropout=0.1,
)
model.add_adapter(peft_config)
# 3. Load a dataset to finetune on
dataset = load_dataset("sentence-transformers/gooaq", split="train")
dataset_dict = dataset.train_test_split(test_size=10_000, seed=12)
train_dataset: Dataset = dataset_dict["train"].select(range(1_000_000))
eval_dataset: Dataset = dataset_dict["test"]
# 4. Define a loss function
loss = CachedMultipleNegativesRankingLoss(model, mini_batch_size=32)
# 5. (Optional) Specify training arguments
run_name = f"{model_name_only}-gooaq-peft"
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=1024,
per_device_eval_batch_size=1024,
learning_rate=2e-5,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=25,
logging_first_step=True,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. (Optional) Create an evaluator & evaluate the base model
# The full corpus, but only the evaluation queries
dev_evaluator = NanoBEIREvaluator()
dev_evaluator(model)
# 7. Create a trainer & train
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
trainer.train()
# (Optional) Evaluate the trained model on the evaluator after training
dev_evaluator(model)
# 8. Save the trained model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
import logging
import sys
import traceback
from datasets import Dataset, load_dataset
from peft import LoraConfig, TaskType
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerModelCardData,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
)
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.losses import MultipleNegativesRankingLoss
from sentence_transformers.training_args import BatchSamplers
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
model_name_only = model_name.split("/")[-1]
# 1. Load a model to finetune with 2. (Optional) model card data
model = SentenceTransformer(
model_name,
model_card_data=SentenceTransformerModelCardData(
language="en",
license="apache-2.0",
model_name=f"{model_name_only} adapter finetuned on GooAQ pairs",
),
)
# Create a LoRA adapter for the model
peft_config = LoraConfig(
task_type=TaskType.FEATURE_EXTRACTION,
inference_mode=False,
r=64,
lora_alpha=128,
lora_dropout=0.1,
)
model.add_adapter(peft_config)
# 3. Load a dataset to finetune on
dataset = load_dataset("sentence-transformers/gooaq", split="train")
dataset_dict = dataset.train_test_split(test_size=10_000, seed=12)
train_dataset: Dataset = dataset_dict["train"].select(range(1_000_000))
eval_dataset: Dataset = dataset_dict["test"]
# 4. Define a loss function
loss = MultipleNegativesRankingLoss(model)
# 5. (Optional) Specify training arguments
run_name = f"{model_name_only}-gooaq-peft"
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=1024,
per_device_eval_batch_size=1024,
learning_rate=2e-5,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=25,
logging_first_step=True,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. (Optional) Create an evaluator & evaluate the base model
# The full corpus, but only the evaluation queries
dev_evaluator = NanoBEIREvaluator(batch_size=1024)
dev_evaluator(model)
# 7. Create a trainer & train
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
trainer.train()
# (Optional) Evaluate the trained model on the evaluator after training
dev_evaluator(model)
# 8. Save the trained model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
from ..utils import is_torch_available
if is_torch_available():
from .faster_cache import FasterCacheConfig, apply_faster_cache
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from ..utils import is_torch_available
if is_torch_available():
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with (
gzip.open(train_path, "rt", encoding="utf8")
if train_path.endswith(".gz")
else open(train_path, encoding="utf8") as fIn
):
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with (
gzip.open(dev_path, "rt", encoding="utf8")
if dev_path.endswith(".gz")
else open(dev_path, encoding="utf8") as fIn
):
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer
import logging
from datetime import datetime
import gzip
import sys
import tqdm
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("Train sentences: {}".format(len(train_sentences)))
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer
import logging
from datetime import datetime
import gzip
import sys
import tqdm
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("Train sentences: {}".format(len(train_sentences)))
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default=[],
)
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
from typing import List, Optional
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default=[],
)
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
_base_ = './htc_x101_32x4d_fpn_16x1_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
groups=64,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
from torchaudio._internal import module_utils as _mod_utils
from . import ffmpeg_utils, sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(0)
__all__ = [
"download_asset",
"sox_utils",
"ffmpeg_utils",
]
|
from torchaudio._internal import module_utils as _mod_utils
from . import sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(0)
__all__ = [
"download_asset",
"sox_utils",
]
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""*YesNo* :cite:`YesNo` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
List[int]:
labels
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""Create a Dataset for *YesNo* :cite:`YesNo`.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, List[int]): ``(waveform, sample_rate, labels)``
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
class Settings:
def __init__(self):
self.JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "")
self.ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true"
self.JWT_ALGORITHM: str = "HS256"
@property
def is_configured(self) -> bool:
return bool(self.JWT_SECRET_KEY)
settings = Settings()
|
import os
from dotenv import load_dotenv
load_dotenv()
class Settings:
JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "")
ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true"
JWT_ALGORITHM: str = "HS256"
@property
def is_configured(self) -> bool:
return bool(self.JWT_SECRET_KEY)
settings = Settings()
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput, SchedulerMixin, UNet2DModel
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput, SchedulerMixin, UNet2DModel
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .ddq_detr_head import DDQDETRHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .grounding_dino_head import GroundingDINOHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead', 'DDQDETRHead', 'GroundingDINOHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead'
]
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.6.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.32.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.5.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.32.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
pass
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}))
assert index._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={}, default_ef=10)
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={})
assert index._runtime_config.default_column_config == {}
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}))
assert index._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={}, default_ef=10)
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={})
assert index._runtime_config.default_column_config == {}
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']:
cfg.data.train.pipeline = [
p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale'
]
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = item.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
gt_bboxes = None
imshow_det_bboxes(
item['img'],
gt_bboxes,
gt_labels,
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=dataset.PALETTE,
text_color=(200, 200, 200),
mask_color=dataset.PALETTE)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
from .vggish_audio_encoder import VggishAudioEncoder
|
from .vggish_audio_encoder import VggishAudioEncoder
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
stacklevel=2,
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
stacklevel=5,
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
"""
Example of training with Dask on GPU
====================================
"""
import cupy as cp
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
{
"verbosity": 2,
"tree_method": "hist",
# Golden line for GPU training
"device": "cuda",
},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X))
y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y))
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
rng = da.random.default_rng(1)
m = 100000
n = 100
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
"""
Example of training with Dask on GPU
====================================
"""
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
{
"verbosity": 2,
"tree_method": "hist",
# Golden line for GPU training
"device": "cuda",
},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X))
y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y))
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=10000)
y = da.random.random(size=(m,), chunks=10000)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.typing import NdArray
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, int, float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
def test_stack():
t0 = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
t1 = parse_obj_as(NdArray, np.ones((3, 224, 224)))
stacked1 = NumpyCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, np.ndarray)
assert stacked1.shape == (2, 3, 224, 224)
stacked2 = NumpyCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, np.ndarray)
assert stacked2.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.typing import NdArray
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, int, float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
def test_stack():
t0 = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
t1 = parse_obj_as(NdArray, np.ones((3, 224, 224)))
stacked1 = NumpyCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, np.ndarray)
assert stacked1.shape == (2, 3, 224, 224)
stacked2 = NumpyCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, np.ndarray)
assert stacked2.shape == (3, 224, 224, 2)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderDC
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderDCTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderDC
main_input_name = "sample"
base_precision = 1e-2
def get_autoencoder_dc_config(self):
return {
"in_channels": 3,
"latent_channels": 4,
"attention_head_dim": 2,
"encoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"decoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"encoder_block_out_channels": (8, 8),
"decoder_block_out_channels": (8, 8),
"encoder_qkv_multiscales": ((), (5,)),
"decoder_qkv_multiscales": ((), (5,)),
"encoder_layers_per_block": (1, 1),
"decoder_layers_per_block": [1, 1],
"downsample_block_type": "conv",
"upsample_block_type": "interpolate",
"decoder_norm_types": "rms_norm",
"decoder_act_fns": "silu",
"scaling_factor": 0.41407,
}
@property
def dummy_input(self):
batch_size = 4
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = self.get_autoencoder_dc_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
@unittest.skip("AutoencoderDC does not support `norm_num_groups` because it does not use GroupNorm.")
def test_forward_with_norm_groups(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderDC
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderDCTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderDC
main_input_name = "sample"
base_precision = 1e-2
def get_autoencoder_dc_config(self):
return {
"in_channels": 3,
"latent_channels": 4,
"attention_head_dim": 2,
"encoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"decoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"encoder_block_out_channels": (8, 8),
"decoder_block_out_channels": (8, 8),
"encoder_qkv_multiscales": ((), (5,)),
"decoder_qkv_multiscales": ((), (5,)),
"encoder_layers_per_block": (1, 1),
"decoder_layers_per_block": [1, 1],
"downsample_block_type": "conv",
"upsample_block_type": "interpolate",
"decoder_norm_types": "rms_norm",
"decoder_act_fns": "silu",
"scaling_factor": 0.41407,
}
@property
def dummy_input(self):
batch_size = 4
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = self.get_autoencoder_dc_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
@unittest.skip("AutoencoderDC does not support `norm_num_groups` because it does not use GroupNorm.")
def test_forward_with_norm_groups(self):
pass
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch.nn.functional as F
from torch import Tensor
from sentence_transformers.models.Module import Module
class Normalize(Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super().__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
return
@classmethod
def load(cls, *args, **kwargs) -> Self:
return cls()
|
from __future__ import annotations
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super().__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path) -> None:
pass
@staticmethod
def load(input_path) -> Normalize:
return Normalize()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
tensors = url.load()
assert isinstance(tensors.vertices, np.ndarray)
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, np.ndarray)
assert isinstance(tensors.faces, NdArray)
assert tensors.vertices.shape[1] == 3
assert tensors.faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', ['vertices', 'faces'])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
tensors = url.load()
assert isinstance(tensors.vertices, np.ndarray)
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, np.ndarray)
assert isinstance(tensors.faces, NdArray)
assert tensors.vertices.shape[1] == 3
assert tensors.faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', ['vertices', 'faces'])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import pytest
from langchain_core.utils.env import get_from_dict_or_env
def test_get_from_dict_or_env() -> None:
assert (
get_from_dict_or_env(
{
"a": "foo",
},
["a"],
"__SOME_KEY_IN_ENV",
)
== "foo"
)
assert (
get_from_dict_or_env(
{
"a": "foo",
},
["b", "a"],
"__SOME_KEY_IN_ENV",
)
== "foo"
)
assert (
get_from_dict_or_env(
{
"a": "foo",
},
"a",
"__SOME_KEY_IN_ENV",
)
== "foo"
)
assert (
get_from_dict_or_env(
{
"a": "foo",
},
"not exists",
"__SOME_KEY_IN_ENV",
default="default",
)
== "default"
)
# Not the most obvious behavior, but
# this is how it works right now
with pytest.raises(
ValueError,
match="Did not find not exists, "
"please add an environment variable `__SOME_KEY_IN_ENV` which contains it, "
"or pass `not exists` as a named parameter.",
):
assert (
get_from_dict_or_env(
{
"a": "foo",
},
"not exists",
"__SOME_KEY_IN_ENV",
)
is None
)
|
import pytest
from langchain_core.utils.env import get_from_dict_or_env
def test_get_from_dict_or_env() -> None:
assert (
get_from_dict_or_env(
{
"a": "foo",
},
["a"],
"__SOME_KEY_IN_ENV",
)
== "foo"
)
assert (
get_from_dict_or_env(
{
"a": "foo",
},
["b", "a"],
"__SOME_KEY_IN_ENV",
)
== "foo"
)
assert (
get_from_dict_or_env(
{
"a": "foo",
},
"a",
"__SOME_KEY_IN_ENV",
)
== "foo"
)
assert (
get_from_dict_or_env(
{
"a": "foo",
},
"not exists",
"__SOME_KEY_IN_ENV",
default="default",
)
== "default"
)
# Not the most obvious behavior, but
# this is how it works right now
with pytest.raises(ValueError):
assert (
get_from_dict_or_env(
{
"a": "foo",
},
"not exists",
"__SOME_KEY_IN_ENV",
)
is None
)
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python package. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory.
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X, y)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory.
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X, y)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import load_weights as load_weights
from keras.src.saving.saving_api import save_model as save_model
from keras.src.saving.saving_api import save_weights as save_weights
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import load_weights
from keras.src.saving.saving_api import save_model
from keras.src.saving.saving_api import save_weights
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
|
"""Weaviate Sub-Question Query Engine Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.vector_stores.weaviate import WeaviateVectorStore
class WeaviateSubQuestionPack(BaseLlamaPack):
"""Weaviate Sub-Question query engine pack."""
def __init__(
self,
collection_name: str,
host: str,
auth_client_secret: str,
nodes: Optional[List[TextNode]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
from weaviate import Client
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
weaviate_client = self.client
weaviate_collection = weaviate_client.get_or_create_collection(collection_name)
self._vector_store = WeaviateVectorStore(
weaviate_collection=weaviate_collection
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever()
query_engine = self._index.as_query_engine()
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine, metadata=ToolMetadata(name="Vector Index")
)
]
self.query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=query_engine_tools
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Weaviate Sub-Question Query Engine Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import SubQuestionQueryEngine
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.vector_stores.weaviate import WeaviateVectorStore
class WeaviateSubQuestionPack(BaseLlamaPack):
"""Weaviate Sub-Question query engine pack."""
def __init__(
self,
collection_name: str,
host: str,
auth_client_secret: str,
nodes: Optional[List[TextNode]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
from weaviate import Client
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
weaviate_client = self.client
weaviate_collection = weaviate_client.get_or_create_collection(collection_name)
self._vector_store = WeaviateVectorStore(
weaviate_collection=weaviate_collection
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever()
query_engine = self._index.as_query_engine()
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine, metadata=ToolMetadata(name="Vector Index")
)
]
self.query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=query_engine_tools
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model_reused: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model_reused
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
import csv
import os
import time
from sentence_transformers import SentenceTransformer, util
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import time
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatMistralAI
@property
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatMistralAI
@property
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}
@property
def supports_json_mode(self) -> bool:
return True
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
# There is a chance to get 40.3 after switching init_cfg,
# otherwise it is about 39.9~40.1
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
bbox_head=dict(
type='CenterNetUpdateHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
hm_min_radius=4,
hm_min_overlap=0.8,
more_pos_thresh=0.2,
more_pos_topk=9,
soft_weight_on_reg=False,
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# single-scale training is about 39.3
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
optim_wrapper = dict(
optimizer=dict(lr=0.01),
# Experiments show that there is no need to turn on clip_grad.
paramwise_cfg=dict(norm_decay_mult=0.))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
# There is a chance to get 40.3 after switching init_cfg,
# otherwise it is about 39.9~40.1
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
bbox_head=dict(
type='CenterNetUpdateHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
hm_min_radius=4,
hm_min_overlap=0.8,
more_pos_thresh=0.2,
more_pos_topk=9,
soft_weight_on_reg=False,
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# single-scale training is about 39.3
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
optim_wrapper = dict(
optimizer=dict(lr=0.01),
# Experiments show that there is no need to turn on clip_grad.
paramwise_cfg=dict(norm_decay_mult=0.))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""
Flat reader.
Extract raw text from a file and save the file type in the metadata
"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def _get_fs(
self, file: Path, fs: Optional[AbstractFileSystem] = None
) -> AbstractFileSystem:
if fs is None:
fs = LocalFileSystem()
return fs
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
fs = self._get_fs(file, fs)
with fs.open(file, encoding="utf-8") as f:
content = f.read()
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
return [Document(text=content, metadata=metadata)]
|
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""Flat reader.
Extract raw text from a file and save the file type in the metadata
"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def _get_fs(
self, file: Path, fs: Optional[AbstractFileSystem] = None
) -> AbstractFileSystem:
if fs is None:
fs = LocalFileSystem()
return fs
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
fs = self._get_fs(file, fs)
with fs.open(file, encoding="utf-8") as f:
content = f.read()
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
return [Document(text=content, metadata=metadata)]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border: Optional[bool] = True, **kwargs) -> None:
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self,
points: Tensor,
gt_bboxes: Union[Tensor, BaseBoxes],
max_dis: Optional[float] = None,
eps: float = 0.1) -> Tensor:
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(
self,
points: Tensor,
pred_bboxes: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None
) -> Union[Tensor, BaseBoxes]:
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True, **kwargs):
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
import os
import sys
import pkg_resources
from setuptools import setup, find_packages
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement = "triton>=2.0.0.dev20221202"
try:
import re
import subprocess
version_line = subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1]
major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0]
if (int(major), int(minor)) < (11, 4):
# the last version supporting CUDA < 11.4
triton_requirement = "triton==2.0.0.dev20221011"
except (IndexError, OSError, subprocess.SubprocessError):
pass
requirements.append(triton_requirement)
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.7",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements + [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy"]},
)
|
import os
import pkg_resources
from setuptools import setup, find_packages
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.7",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest"]},
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.registry import EVALUATOR, METRICS
from mmengine.structures import BaseDataElement
from .metric import BaseMetric
@EVALUATOR.register_module()
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
self.metrics.append(metric)
@property
def dataset_meta(self) -> Optional[dict]:
"""Optional[dict]: Meta info of the dataset."""
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
"""Set the dataset meta info to the evaluator and it's metrics."""
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[dict],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for data in data_batch:
if isinstance(data['data_sample'], BaseDataElement):
_data_batch.append(
dict(
inputs=data['inputs'],
data_sample=data['data_sample'].to_dict()))
else:
_data_batch.append(data)
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import EVALUATOR, METRICS
from .metric import BaseMetric
@EVALUATOR.register_module()
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
self.metrics.append(metric)
@property
def dataset_meta(self) -> Optional[dict]:
"""Optional[dict]: Meta info of the dataset."""
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
"""Set the dataset meta info to the evaluator and it's metrics."""
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[dict],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for data in data_batch:
if isinstance(data['data_sample'], BaseDataElement):
_data_batch.append(
dict(
inputs=data['inputs'],
data_sample=data['data_sample'].to_dict()))
else:
_data_batch.append(data)
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import RedisChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedisChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import RedisChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedisChatMessageHistory",
]
|
from typing import Optional, TypeVar
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
from typing import Optional, TypeVar
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.tensor = video.url.load_key_frames()
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load_key_frames()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you can also implement the method `get_config()` and the class
method `from_config` in order to support serialization, just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
class ExampleRandomNormal(Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return keras.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype
)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config()` in the example above
since the constructor arguments of the class the keys in the config returned
by `get_config()` are the same. In this case, the default `from_config()`
works fine.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
"""
raise NotImplementedError(
"Initializer subclasses must implement the `__call__()` method."
)
def get_config(self):
"""Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance.
"""
return cls(**config)
def clone(self):
return self.__class__.from_config(self.get_config())
|
from keras.src.api_export import keras_export
@keras_export(["keras.Initializer", "keras.initializers.Initializer"])
class Initializer:
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__()` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config()` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
class ExampleRandomNormal(Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return keras.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype
)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config()` in the example above
since the constructor arguments of the class the keys in the config returned
by `get_config()` are the same. In this case, the default `from_config()`
works fine.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
"""
raise NotImplementedError(
"Initializer subclasses must implement the `__call__()` method."
)
def get_config(self):
"""Returns the initializer's configuration as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
An `Initializer` instance.
"""
return cls(**config)
def clone(self):
return self.__class__.from_config(self.get_config())
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import torch
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
if __name__ == '__main__':
main()
|
import argparse
import cv2
import torch
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
if __name__ == '__main__':
main()
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.5'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.4'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from __future__ import annotations
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
import unittest
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.core.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCase):
def test_loss(self):
head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
test_cfg = Config(dict(mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])
mask_results.labels = torch.tensor([0, 1])
mask_results.scores = torch.tensor([0.8, 0.7])
mask_results.masks = torch.tensor([[[1, 0], [0, 0]], [[0, 0],
[0, 1]]]).bool()
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [
torch.tensor([[0 + 1 * INSTANCE_OFFSET, 2],
[3, 1 + 2 * INSTANCE_OFFSET]])
]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
# test with no thing
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.zeros((0, 4))
mask_results.labels = torch.zeros((0, )).long()
mask_results.scores = torch.zeros((0, ))
mask_results.masks = torch.zeros((0, 2, 2), dtype=torch.bool)
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [torch.tensor([[4, 2], [3, 4]])]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
|
import unittest
import torch
from mmengine.config import Config
from mmengine.testing import assert_allclose
from mmdet.core.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCase):
def test_loss(self):
head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
test_cfg = Config(
dict(mask_thr_binary=0.5, mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
det_bboxes_list = [torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])]
det_labels_list = [torch.tensor([0, 1])]
mask_preds_list = [
torch.tensor([[[0.6, 0.1], [0.1, 0.2]], [[0.1, 0.1], [0.2, 0.8]]])
]
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [
torch.tensor([[0 + 2 * INSTANCE_OFFSET, 2],
[3, 1 + 1 * INSTANCE_OFFSET]])
]
results_list = head.predict(det_bboxes_list, det_labels_list,
mask_preds_list, seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
# test with no thing
test_cfg = Config(
dict(mask_thr_binary=0.5, mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
det_bboxes_list = [torch.zeros((0, 4))]
det_labels_list = [torch.tensor((0, ))]
mask_preds_list = [torch.tensor((0, 2, 2))]
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [torch.tensor([[4, 2], [3, 4]])]
results_list = head.predict(det_bboxes_list, det_labels_list,
mask_preds_list, seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION'
]
|
from dataclasses import dataclass, fields, field
from typing import Optional, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.score import NamedScore
default_values = dict(value=0.0, op_name='', description='', ref_id='')
@dataclass(unsafe_hash=True)
class NamedScoreData:
_reference_ns: 'NamedScore' = field(hash=False, compare=False)
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
@property
def _non_empty_fields(self) -> Tuple[str]:
r = []
for f in fields(self):
f_name = f.name
if not f_name.startswith('_'):
v = getattr(self, f_name)
if v is not None:
r.append(f_name)
return tuple(r)
def _set_default_value_if_none(self, key):
if getattr(self, key) is None:
setattr(self, key, default_values[key])
|
from dataclasses import dataclass, fields, field
from typing import Optional, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.score import NamedScore
default_values = dict(value=0.0, op_name='', description='', ref_id='')
@dataclass(unsafe_hash=True)
class NamedScoreData:
_reference_ns: 'NamedScore' = field(hash=False, compare=False)
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
@property
def _non_empty_fields(self) -> Tuple[str]:
r = []
for f in fields(self):
f_name = f.name
if not f_name.startswith('_'):
v = getattr(self, f_name)
if v is not None:
if f_name not in default_values:
r.append(f_name)
else:
dv = default_values[f_name]
if v != dv:
r.append(f_name)
return tuple(r)
def _set_default_value_if_none(self, key):
if getattr(self, key) is None:
setattr(self, key, default_values[key])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
def test_nanobeir_evaluator():
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
def test_nanobeir_evaluator():
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
import torchaudio
_STREAM_READER = [
"StreamReader",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import torchaudio
_STREAM_READER = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not torchaudio._extension._FFMPEG_INITIALIZED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
import pytest
from backend.util.service import (
AppService,
AppServiceClient,
endpoint_to_async,
expose,
get_service_client,
)
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
def cleanup(self):
pass
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
return a + b
@expose
def subtract(self, a: int, b: int) -> int:
return a - b
@expose
def fun_with_async(self, a: int, b: int) -> int:
async def add_async(a: int, b: int) -> int:
return a + b
return self.run_and_wait(add_async(a, b))
class ServiceTestClient(AppServiceClient):
@classmethod
def get_service_type(cls):
return ServiceTest
add = ServiceTest.add
subtract = ServiceTest.subtract
fun_with_async = ServiceTest.fun_with_async
add_async = endpoint_to_async(ServiceTest.add)
subtract_async = endpoint_to_async(ServiceTest.subtract)
@pytest.mark.asyncio
async def test_service_creation(server):
with ServiceTest():
client = get_service_client(ServiceTestClient)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
assert await client.add_async(5, 3) == 8
assert await client.subtract_async(10, 4) == 6
|
import pytest
from backend.util.service import (
AppService,
AppServiceClient,
endpoint_to_async,
expose,
get_service_client,
)
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
def cleanup(self):
pass
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
return a + b
@expose
def subtract(self, a: int, b: int) -> int:
return a - b
@expose
def fun_with_async(self, a: int, b: int) -> int:
async def add_async(a: int, b: int) -> int:
return a + b
return self.run_and_wait(add_async(a, b))
class ServiceTestClient(AppServiceClient):
@classmethod
def get_service_type(cls):
return ServiceTest
add = ServiceTest.add
subtract = ServiceTest.subtract
fun_with_async = ServiceTest.fun_with_async
add_async = endpoint_to_async(ServiceTest.add)
subtract_async = endpoint_to_async(ServiceTest.subtract)
@pytest.mark.asyncio(loop_scope="session")
async def test_service_creation(server):
with ServiceTest():
client = get_service_client(ServiceTestClient)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
assert await client.add_async(5, 3) == 8
assert await client.subtract_async(10, 4) == 6
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
from sentence_transformers import SentenceTransformer, InputExample, LoggingHandler, losses, models, util
from torch.utils.data import DataLoader
from sentence_transformers.evaluation import TripletEvaluator
from datetime import datetime
from zipfile import ZipFile
import csv
import logging
import os
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
dataset_path = "datasets/wikipedia-sections"
if not os.path.exists(dataset_path):
os.makedirs(dataset_path, exist_ok=True)
filepath = os.path.join(dataset_path, "wikipedia-sections-triplets.zip")
util.http_get("https://sbert.net/datasets/wikipedia-sections-triplets.zip", filepath)
with ZipFile(filepath, "r") as zip:
zip.extractall(dataset_path)
### Create a torch.DataLoader that passes training batch instances to our model
train_batch_size = 16
output_path = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
num_epochs = 1
### Configure sentence transformers for training and train on the provided dataset
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
logger.info("Read Triplet train dataset")
train_examples = []
with open(os.path.join(dataset_path, "train.csv"), encoding="utf-8") as fIn:
reader = csv.DictReader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in reader:
train_examples.append(InputExample(texts=[row["Sentence1"], row["Sentence2"], row["Sentence3"]], label=0))
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
logger.info("Read Wikipedia Triplet dev dataset")
dev_examples = []
with open(os.path.join(dataset_path, "validation.csv"), encoding="utf-8") as fIn:
reader = csv.DictReader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in reader:
dev_examples.append(InputExample(texts=[row["Sentence1"], row["Sentence2"], row["Sentence3"]]))
if len(dev_examples) >= 1000:
break
evaluator = TripletEvaluator.from_input_examples(dev_examples, name="dev")
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) # 10% of train data
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=output_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
logger.info("Read test examples")
test_examples = []
with open(os.path.join(dataset_path, "test.csv"), encoding="utf-8") as fIn:
reader = csv.DictReader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in reader:
test_examples.append(InputExample(texts=[row["Sentence1"], row["Sentence2"], row["Sentence3"]]))
model = SentenceTransformer(output_path)
test_evaluator = TripletEvaluator.from_input_examples(test_examples, name="test")
test_evaluator(model, output_path=output_path)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax import tensorboard
from keras.src.backend.jax.core import IS_THREAD_SAFE
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import IS_THREAD_SAFE
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MSE - Mean Squared Error Metric"""
from sklearn.metrics import mean_squared_error
import datasets
_CITATION = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_DESCRIPTION = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mse(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(self._get_feature_types()),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
],
)
def _get_feature_types(self):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def _compute(self, predictions, references, sample_weight=None, multioutput="uniform_average", squared=True):
mse = mean_squared_error(
references, predictions, sample_weight=sample_weight, multioutput=multioutput, squared=squared
)
return {"mse": mse}
|
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MSE - Mean Squared Error Metric"""
from sklearn.metrics import mean_squared_error
import datasets
_CITATION = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_DESCRIPTION = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mse(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(self._get_feature_types()),
reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
],
)
def _get_feature_types(self):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def _compute(self, predictions, references, sample_weight=None, multioutput="uniform_average", squared=True):
mse = mean_squared_error(
references, predictions, sample_weight=sample_weight, multioutput=multioutput, squared=squared
)
return {"mse": mse}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_beit import *
from .feature_extraction_beit import *
from .image_processing_beit import *
from .image_processing_beit_fast import *
from .modeling_beit import *
from .modeling_flax_beit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_beit import *
from .feature_extraction_beit import *
from .image_processing_beit import *
from .modeling_beit import *
from .modeling_flax_beit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
"""
Example of forwarding evaluation logs to the client
===================================================
The example runs on GPU. Two classes are defined to show how to use Dask builtins to
forward the logs to the client process.
"""
import logging
import dask
import distributed
from dask import array as da
from dask_cuda import LocalCUDACluster
from distributed import Client
from xgboost import dask as dxgb
from xgboost.callback import EvaluationMonitor
def _get_logger() -> logging.Logger:
logger = logging.getLogger("[xgboost.dask]")
logger.setLevel(logging.INFO)
if not logger.hasHandlers():
handler = logging.StreamHandler()
logger.addHandler(handler)
return logger
class ForwardLoggingMonitor(EvaluationMonitor):
def __init__(
self,
client: Client,
rank: int = 0,
period: int = 1,
) -> None:
"""Print the evaluation result at each iteration. The default monitor in the
native interface logs the result to the Dask scheduler process. This class can
be used to forward the logging to the client process. Important: see the
`client` parameter for more info.
Parameters
----------
client :
Distributed client. This must be the top-level client. The class uses
:py:meth:`distributed.Client.forward_logging` in conjunction with the Python
:py:mod:`logging` module to forward the evaluation results to the client
process. It has undefined behaviour if called in a nested task. As a result,
client-side logging is not enabled by default.
"""
client.forward_logging(_get_logger().name)
super().__init__(
rank=rank,
period=period,
logger=lambda msg: _get_logger().info(msg.strip()),
)
class WorkerEventMonitor(EvaluationMonitor):
"""Use :py:meth:`distributed.print` to forward the log. A downside is that not only
all clients connected to the cluster can see the log, the logs are also printed on
the worker. If you use a local cluster, the log is duplicated.
"""
def __init__(self, rank: int = 0, period: int = 1) -> None:
super().__init__(
rank=rank, period=period, logger=lambda msg: distributed.print(msg.strip())
)
def hist_train(
client: Client, X: da.Array, y: da.Array, monitor: EvaluationMonitor
) -> da.Array:
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than as a training DMatrix, unless a reference is
# specified. See the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
# Make sure the device is set to CUDA.
{"tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
# Use the monitor to forward the log.
callbacks=[monitor],
# Disable the internal logging and prefer the client-side `EvaluationMonitor`.
verbose_eval=False,
)
bst = output["booster"]
history = output["history"]
prediction = dxgb.predict(client, bst, X)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
# Create client from cluster, set the backend to GPU array (cupy).
with Client(cluster) as client, dask.config.set({"array.backend": "cupy"}):
# Generate some random data for demonstration
rng = da.random.default_rng(1)
m = 2**18
n = 100
X = rng.uniform(size=(m, n), chunks=(128**2, -1))
y = X.sum(axis=1)
# Use forwarding, the client must be the top client.
monitor: EvaluationMonitor = ForwardLoggingMonitor(client)
hist_train(client, X, y, monitor).compute()
# Use distributed.print, the logs in this demo are duplicated as the same
# log is printed in all workers along with the client.
monitor = WorkerEventMonitor()
hist_train(client, X, y, monitor).compute()
|
"""Example of forwarding evaluation logs to the client
===================================================
The example runs on GPU. Two classes are defined to show how to use Dask builtins to
forward the logs to the client process.
"""
import logging
import dask
import distributed
from dask import array as da
from dask_cuda import LocalCUDACluster
from distributed import Client
from xgboost import dask as dxgb
from xgboost.callback import EvaluationMonitor
def _get_logger() -> logging.Logger:
logger = logging.getLogger("[xgboost.dask]")
logger.setLevel(logging.INFO)
if not logger.hasHandlers():
handler = logging.StreamHandler()
logger.addHandler(handler)
return logger
class ForwardLoggingMonitor(EvaluationMonitor):
def __init__(
self,
client: Client,
rank: int = 0,
period: int = 1,
) -> None:
"""Print the evaluation result at each iteration. The default monitor in the
native interface logs the result to the Dask scheduler process. This class can
be used to forward the logging to the client process. Important: see the
`client` parameter for more info.
Parameters
----------
client :
Distributed client. This must be the top-level client. The class uses
:py:meth:`distributed.Client.forward_logging` in conjunction with the Python
:py:mod:`logging` module to forward the evaluation results to the client
process. It has undefined behaviour if called in a nested task. As a result,
client-side logging is not enabled by default.
"""
client.forward_logging(_get_logger().name)
super().__init__(
rank=rank,
period=period,
logger=lambda msg: _get_logger().info(msg.strip()),
)
class WorkerEventMonitor(EvaluationMonitor):
"""Use :py:meth:`distributed.print` to forward the log. A downside is that not only
all clients connected to the cluster can see the log, the logs are also printed on
the worker. If you use a local cluster, the log is duplicated.
"""
def __init__(self, rank: int = 0, period: int = 1) -> None:
super().__init__(
rank=rank, period=period, logger=lambda msg: distributed.print(msg.strip())
)
def hist_train(
client: Client, X: da.Array, y: da.Array, monitor: EvaluationMonitor
) -> da.Array:
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than as a training DMatrix, unless a reference is
# specified. See the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
# Make sure the device is set to CUDA.
{"tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
# Use the monitor to forward the log.
callbacks=[monitor],
# Disable the internal logging and prefer the client-side `EvaluationMonitor`.
verbose_eval=False,
)
bst = output["booster"]
history = output["history"]
prediction = dxgb.predict(client, bst, X)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
# Create client from cluster, set the backend to GPU array (cupy).
with Client(cluster) as client, dask.config.set({"array.backend": "cupy"}):
# Generate some random data for demonstration
rng = da.random.default_rng(1)
m = 2**18
n = 100
X = rng.uniform(size=(m, n), chunks=(128**2, -1))
y = X.sum(axis=1)
# Use forwarding, the client must be the top client.
monitor: EvaluationMonitor = ForwardLoggingMonitor(client)
hist_train(client, X, y, monitor).compute()
# Use distributed.print, the logs in this demo are duplicated as the same
# log is printed in all workers along with the client.
monitor = WorkerEventMonitor()
hist_train(client, X, y, monitor).compute()
|
import os
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (string): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
import os
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (string): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = Image.open(filename).convert("RGB")
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print("Files already downloaded and verified")
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256))
# learning policy
max_epochs = 20
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
__all__ = [
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"utils",
]
|
from . import utils
from .model import (
Wav2Vec2Model,
HuBERTPretrainModel,
wav2vec2_model,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
hubert_pretrain_model,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_xlarge,
)
__all__ = [
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"utils",
]
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_opencv_available,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available() and is_opencv_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_and_opencv_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_opencv_objects))
else:
_import_structure["pipeline_consisid"] = ["ConsisIDPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_consisid import ConsisIDPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_consisid"] = ["ConsisIDPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_consisid import ConsisIDPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
# model settings
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
|
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
# model settings
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
|
import os
from pathlib import Path
import pytest
from jina import Executor
def test_config():
ranker = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
'relevance_label': 'relevance',
},
)
assert ranker.query_features == ['query']
assert ranker.match_features == ['match']
def test_dump_load_path_not_exist(ranker):
with pytest.raises(ValueError):
ranker.dump(parameters={})
ranker.load(parameters={})
def test_train(ranker, documents_to_train_price_sensitive_model):
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
def test_train_with_categorical_features(
ranker_with_categorical_features, documents_to_train_price_sensitive_model
):
"""Weight field specify the importance of the features."""
ranker_with_categorical_features.train(
docs=documents_to_train_price_sensitive_model
)
assert ranker_with_categorical_features.booster
def test_dump_load(ranker, documents_to_train_price_sensitive_model, tmpdir):
model_path = str(tmpdir) + 'model.txt'
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.dump(parameters={'model_path': model_path})
assert os.path.exists(model_path)
ranker.load({'model_path': model_path})
assert ranker.booster
def test_rank_price_sensitive_model(
ranker,
documents_to_train_price_sensitive_model,
documents_random_brand,
):
"""train the model using price sensitive data, assure higher price get lower relevance score."""
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.rank(documents_random_brand)
for doc in documents_random_brand:
predicted_relevances = []
predicted_ids = []
expected_ids = ['3', '4', '2', '1'] # Price smaller to large.
for match in doc.matches:
predicted_relevances.append(match.scores.get('relevance').value)
predicted_ids.append(match.id)
assert (
predicted_relevances[0]
>= predicted_relevances[1]
>= predicted_relevances[2]
)
assert predicted_ids == expected_ids
|
import os
from pathlib import Path
import pytest
from jina import Executor
def test_config():
ranker = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
'relevance_label': 'relevance',
},
)
assert ranker.query_features == ['query']
assert ranker.match_features == ['match']
def test_dump_load_path_not_exist(ranker):
with pytest.raises(ValueError):
ranker.dump(parameters={})
ranker.load(parameters={})
def test_train(ranker, documents_to_train_price_sensitive_model):
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
def test_train_with_categorical_features(
ranker_with_categorical_features, documents_to_train_price_sensitive_model
):
"""Weight field specify the importance of the features."""
ranker_with_categorical_features.train(
docs=documents_to_train_price_sensitive_model
)
assert ranker_with_categorical_features.booster
def test_dump_load(ranker, documents_to_train_price_sensitive_model, tmpdir):
model_path = str(tmpdir) + 'model.txt'
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.dump(parameters={'model_path': model_path})
assert os.path.exists(model_path)
ranker.load({'model_path': model_path})
assert ranker.booster
def test_rank_price_sensitive_model(
ranker,
documents_to_train_price_sensitive_model,
documents_random_brand,
):
"""train the model using price sensitive data, assure higher price get lower relevance score."""
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.rank(documents_random_brand)
for doc in documents_random_brand:
predicted_relevances = []
predicted_ids = []
expected_ids = ['3', '4', '2', '1'] # Price smaller to large.
for match in doc.matches:
predicted_relevances.append(match.scores.get('relevance').value)
predicted_ids.append(match.id)
assert (
predicted_relevances[0]
>= predicted_relevances[1]
>= predicted_relevances[2]
)
assert predicted_ids == expected_ids
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
train_batch_size = 16
output_dir = "output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss: https://sbert.net/docs/package_reference/sentence_transformer/losses.html#softmaxloss
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
logging.info("Evaluation before training:")
dev_evaluator(model)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-nli-v1")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-nli-v1')`."
)
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
train_batch_size = 16
output_dir = "output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss: https://sbert.net/docs/package_reference/losses.html#softmaxloss
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
logging.info("Evaluation before training:")
dev_evaluator(model)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-nli-v1")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-nli-v1')`."
)
|
import json
import os
from typing import Dict
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class Dropout(nn.Module):
"""Dropout layer.
:param dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
import os
import time
import pytest
from docarray import Document
from jina import Client, Flow
from jina.serve.networking.utils import send_health_check_sync
@pytest.fixture
def error_log_level():
old_env = os.environ.get('JINA_LOG_LEVEL')
os.environ['JINA_LOG_LEVEL'] = 'ERROR'
yield
os.environ['JINA_LOG_LEVEL'] = old_env
@pytest.fixture
def cert_pem():
"""This is the cert entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.crt'
@pytest.fixture
def key_pem():
"""This is the key entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.key'
def test_grpc_ssl_with_flow(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as f:
with pytest.raises(ConnectionError):
Client(protocol='grpc', port=f.port, tls=True).index([Document()])
# the openssl error from above seems to take a bit to actually terminate and may cause the next test to seg fault
time.sleep(15.0)
def test_grpc_ssl_with_flow_and_client(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as flow:
with open(cert_pem, 'rb') as f:
creds = f.read()
send_health_check_sync(
target=f'localhost:{flow.port}',
root_certificates=creds,
tls=True,
timeout=1.0,
)
time.sleep(15.0)
|
import os
import time
import pytest
from docarray import Document
from jina import Client, Flow
from jina.serve.networking import GrpcConnectionPool
@pytest.fixture
def error_log_level():
old_env = os.environ.get('JINA_LOG_LEVEL')
os.environ['JINA_LOG_LEVEL'] = 'ERROR'
yield
os.environ['JINA_LOG_LEVEL'] = old_env
@pytest.fixture
def cert_pem():
"""This is the cert entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.crt'
@pytest.fixture
def key_pem():
"""This is the key entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.key'
def test_grpc_ssl_with_flow(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as f:
with pytest.raises(ConnectionError):
Client(protocol='grpc', port=f.port, tls=True).index([Document()])
# the openssl error from above seems to take a bit to actually terminate and may cause the next test to seg fault
time.sleep(15.0)
def test_grpc_ssl_with_flow_and_client(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as flow:
with open(cert_pem, 'rb') as f:
creds = f.read()
GrpcConnectionPool.send_health_check_sync(
target=f'localhost:{flow.port}',
root_certificates=creds,
tls=True,
timeout=1.0,
)
time.sleep(15.0)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .mongo_handler import MongoHandler
from .mongo_storage import MongoDBStorage
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .mongo_storage import MongoDBStorage
from .mongo_handler import MongoHandler
|
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
@property
def torch_params(self):
if not hasattr(self, "_torch_params"):
self._track_variables()
return self._torch_params
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self._torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix="",
recurse=True,
remove_duplicate=True,
):
if not hasattr(self, "_torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "_torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path in self.torch_params:
self.torch_params.pop(variable.path)
|
from typing import Iterator
from typing import Tuple
import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
@property
def torch_params(self):
if not hasattr(self, "_torch_params"):
self._track_variables()
return self._torch_params
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self._torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if not hasattr(self, "_torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "_torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path in self.torch_params:
self.torch_params.pop(variable.path)
|
_base_ = './cascade-mask-rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
import re
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowValidationError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
def test_decorated_config(workflow):
def f(self, ev: Event) -> Event:
return Event()
res = step(workflow=workflow.__class__)(f)
config = getattr(res, "__step_config")
assert config.accepted_events == [Event]
assert config.event_name == "ev"
assert config.return_types == [Event]
def test_decorate_method():
class TestWorkflow(Workflow):
@step
def f1(self, ev: StartEvent) -> Event:
return ev
@step
def f2(self, ev: Event) -> StopEvent:
return StopEvent()
wf = TestWorkflow()
assert getattr(wf.f1, "__step_config")
assert getattr(wf.f2, "__step_config")
def test_decorate_wrong_signature():
def f():
pass
with pytest.raises(WorkflowValidationError):
step()(f)
def test_decorate_free_function():
class TestWorkflow(Workflow):
pass
@step(workflow=TestWorkflow)
def f(ev: Event) -> Event:
return Event()
assert TestWorkflow._step_functions == {"f": f}
def test_decorate_free_function_wrong_decorator():
with pytest.raises(
WorkflowValidationError,
match=re.escape(
"To decorate f please pass a workflow class to the @step decorator."
),
):
@step
def f(ev: Event) -> Event:
return Event()
def test_decorate_free_function_wrong_num_workers():
class TestWorkflow(Workflow):
pass
with pytest.raises(
WorkflowValidationError, match="num_workers must be an integer greater than 0"
):
@step(workflow=TestWorkflow, num_workers=0)
def f1(ev: Event) -> Event:
return Event()
with pytest.raises(
WorkflowValidationError, match="num_workers must be an integer greater than 0"
):
@step(workflow=TestWorkflow, num_workers=0.5) # type: ignore
def f2(ev: Event) -> Event:
return Event()
|
import re
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowValidationError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
def test_decorated_config(workflow):
def f(self, ev: Event) -> Event:
return Event()
res = step(workflow=workflow.__class__)(f)
config = getattr(res, "__step_config")
assert config.accepted_events == [Event]
assert config.event_name == "ev"
assert config.return_types == [Event]
def test_decorate_method():
class TestWorkflow(Workflow):
@step
def f1(self, ev: StartEvent) -> Event:
return ev
@step
def f2(self, ev: Event) -> StopEvent:
return StopEvent()
wf = TestWorkflow()
assert getattr(wf.f1, "__step_config")
assert getattr(wf.f2, "__step_config")
def test_decorate_wrong_signature():
def f():
pass
with pytest.raises(WorkflowValidationError):
step()(f)
def test_decorate_free_function():
class TestWorkflow(Workflow):
pass
@step(workflow=TestWorkflow)
def f(ev: Event) -> Event:
return Event()
assert TestWorkflow._step_functions == {"f": f}
def test_decorate_free_function_wrong_decorator():
with pytest.raises(
WorkflowValidationError,
match=re.escape(
"To decorate f please pass a workflow class to the @step decorator."
),
):
@step
def f(ev: Event) -> Event:
return Event()
def test_decorate_free_function_wrong_num_workers():
class TestWorkflow(Workflow):
pass
with pytest.raises(
WorkflowValidationError, match="num_workers must be an integer greater than 0"
):
@step(workflow=TestWorkflow, num_workers=0)
def f1(ev: Event) -> Event:
return Event()
with pytest.raises(
WorkflowValidationError, match="num_workers must be an integer greater than 0"
):
@step(workflow=TestWorkflow, num_workers=0.5)
def f2(ev: Event) -> Event:
return Event()
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/configs'
files = sorted(glob.glob('../../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../../configs', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/'
files = sorted(glob.glob('../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
import logging
import os
import sys
from collections import defaultdict
import numpy as np
import pytrec_eval
import tqdm
from sentence_transformers import CrossEncoder, util
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz",
passage_filepath,
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
"""
This file evaluates CrossEncoder on the TREC 2019 Deep Learning (DL) Track: https://arxiv.org/abs/2003.07820
TREC 2019 DL is based on the corpus of MS Marco. MS Marco provides a sparse annotation, i.e., usually only a single
passage is marked as relevant for a given query. Many other highly relevant passages are not annotated and hence are treated
as an error if a model ranks those high.
TREC DL instead annotated up to 200 passages per query for their relevance to a given query. It is better suited to estimate
the model performance for the task of reranking in Information Retrieval.
Run:
python eval_cross-encoder-trec-dl.py cross-encoder-model-name
"""
import gzip
from collections import defaultdict
import logging
import tqdm
import numpy as np
import sys
import pytrec_eval
from sentence_transformers import util, CrossEncoder
import os
data_folder = "trec2019-data"
os.makedirs(data_folder, exist_ok=True)
# Read test queries
queries = {}
queries_filepath = os.path.join(data_folder, "msmarco-test2019-queries.tsv.gz")
if not os.path.exists(queries_filepath):
logging.info("Download " + os.path.basename(queries_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-test2019-queries.tsv.gz", queries_filepath
)
with gzip.open(queries_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
# Read which passages are relevant
relevant_docs = defaultdict(lambda: defaultdict(int))
qrels_filepath = os.path.join(data_folder, "2019qrels-pass.txt")
if not os.path.exists(qrels_filepath):
logging.info("Download " + os.path.basename(qrels_filepath))
util.http_get("https://trec.nist.gov/data/deep/2019qrels-pass.txt", qrels_filepath)
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, score = line.strip().split()
score = int(score)
if score > 0:
relevant_docs[qid][pid] = score
# Only use queries that have at least one relevant passage
relevant_qid = []
for qid in queries:
if len(relevant_docs[qid]) > 0:
relevant_qid.append(qid)
# Read the top 1000 passages that are supposed to be re-ranked
passage_filepath = os.path.join(data_folder, "msmarco-passagetest2019-top1000.tsv.gz")
if not os.path.exists(passage_filepath):
logging.info("Download " + os.path.basename(passage_filepath))
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/msmarco-passagetest2019-top1000.tsv.gz",
passage_filepath,
)
passage_cand = {}
with gzip.open(passage_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
qid, pid, query, passage = line.strip().split("\t")
if qid not in passage_cand:
passage_cand[qid] = []
passage_cand[qid].append([pid, passage])
logging.info("Queries: {}".format(len(queries)))
queries_result_list = []
run = {}
model = CrossEncoder(sys.argv[1], max_length=512)
for qid in tqdm.tqdm(relevant_qid):
query = queries[qid]
cand = passage_cand[qid]
pids = [c[0] for c in cand]
corpus_sentences = [c[1] for c in cand]
cross_inp = [[query, sent] for sent in corpus_sentences]
if model.config.num_labels > 1: # Cross-Encoder that predict more than 1 score, we use the last and apply softmax
cross_scores = model.predict(cross_inp, apply_softmax=True)[:, 1].tolist()
else:
cross_scores = model.predict(cross_inp).tolist()
cross_scores_sparse = {}
for idx, pid in enumerate(pids):
cross_scores_sparse[pid] = cross_scores[idx]
sparse_scores = cross_scores_sparse
run[qid] = {}
for pid in sparse_scores:
run[qid][pid] = float(sparse_scores[pid])
evaluator = pytrec_eval.RelevanceEvaluator(relevant_docs, {"ndcg_cut.10"})
scores = evaluator.evaluate(run)
print("Queries:", len(relevant_qid))
print("NDCG@10: {:.2f}".format(np.mean([ele["ndcg_cut_10"] for ele in scores.values()]) * 100))
|
import http.client
import json
from typing import Optional
def list_packages(*, contains: Optional[str] = None):
conn = http.client.HTTPSConnection("api.github.com")
headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"User-Agent": "langchain-cli",
}
conn.request(
"GET", "/repos/langchain-ai/langchain/contents/templates", headers=headers
)
res = conn.getresponse()
res_str = res.read()
data = json.loads(res_str)
package_names = [
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
]
return [p for p in package_names if contains in p] if contains else package_names
|
import http.client
import json
from typing import Optional
def list_packages(*, contains: Optional[str] = None):
conn = http.client.HTTPSConnection("api.github.com")
headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
"User-Agent": "langchain-cli",
}
conn.request(
"GET", "/repos/langchain-ai/langchain/contents/templates", headers=headers
)
res = conn.getresponse()
res_str = res.read()
data = json.loads(res_str)
package_names = [
p["name"] for p in data if p["type"] == "dir" and p["name"] != "docs"
]
package_names_filtered = (
[p for p in package_names if contains in p] if contains else package_names
)
return package_names_filtered
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
gt_bboxes = get_box_tensor(gt_bboxes)
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
if self.use_box_type:
pred_bboxes = HorizontalBoxes(pred_bboxes)
return pred_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super(BaseBBoxCoder, self).__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
return pred_bboxes
|
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
),
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True, # noqa: FBT001,FBT002
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False, # noqa: FBT001,FBT002
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key,
llm=llm,
max_token_limit=max_token_limit,
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
|
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True, # noqa: FBT001,FBT002
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False, # noqa: FBT001,FBT002
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
# These are the shortened model names
# Any model that contains one of these names will not support function calling
MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT = [
"gemini-2.0-flash-thinking",
"gemini-2.0-flash-lite",
]
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
text: str = None,
delta: str = None,
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(
text=text if text is not None else response.text,
delta=delta if delta is not None else response.text,
raw=raw,
)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
if block.text:
parts.append({"text": block.text})
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
for tool_call in message.additional_kwargs.get("tool_calls", []):
parts.append(tool_call)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
def is_function_calling_model(model: str) -> bool:
for model_name in MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT:
if model_name in model:
return False
return True
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
# These are the shortened model names
# Any model that contains one of these names will not support function calling
MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT = [
"gemini-2.0-flash-thinking",
"gemini-2.0-flash-lite",
]
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
if block.text:
parts.append({"text": block.text})
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
for tool_call in message.additional_kwargs.get("tool_calls", []):
parts.append(tool_call)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
def is_function_calling_model(model: str) -> bool:
for model_name in MODELS_WITHOUT_FUNCTION_CALLING_SUPPORT:
if model_name in model:
return False
return True
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, NdArray, TextUrl, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
assert isinstance(value, doc._get_nested_document_class(field))
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, Tensor, TextUrl, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: Tensor
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
assert isinstance(value, doc._get_nested_document_class(field))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to get the files against which we will run doc testing.
This uses `tests_fetcher.get_all_doctest_files` then groups the test files by their directory paths.
The files in `docs/source/en/model_doc` or `docs/source/en/tasks` are **NOT** grouped together with other files in the
same directory: the objective is to run doctest against them in independent GitHub Actions jobs.
Assume we are under `transformers` root directory:
To get a map (dictionary) between directory (or file) paths and the corresponding files
```bash
python utils/split_doctest_jobs.py
```
or to get a list of lists of directory (or file) paths
```bash
python utils/split_doctest_jobs.py --only_return_keys --num_splits 4
```
(this is used to allow GitHub Actions to generate more than 256 jobs using matrix)
"""
import argparse
from collections import defaultdict
from pathlib import Path
from tests_fetcher import get_all_doctest_files
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--only_return_keys",
action="store_true",
help="if to only return the keys (which is a list of list of files' directory or file paths).",
)
parser.add_argument(
"--num_splits",
type=int,
default=1,
help="the number of splits into which the (flat) list of directory/file paths will be split. This has effect only if `only_return_keys` is `True`.",
)
args = parser.parse_args()
all_doctest_files = get_all_doctest_files()
raw_test_collection_map = defaultdict(list)
for file in all_doctest_files:
file_dir = "/".join(Path(file).parents[0].parts)
# not to run files in `src/` for now as it is completely broken at this moment. See issues/39159 and
# https://github.com/huggingface/transformers/actions/runs/15988670157
# TODO (ydshieh): fix the error, ideally before 2025/09
if file_dir.startswith("src/"):
continue
raw_test_collection_map[file_dir].append(file)
refined_test_collection_map = {}
for file_dir in raw_test_collection_map.keys():
if file_dir in ["docs/source/en/model_doc", "docs/source/en/tasks"]:
for file in raw_test_collection_map[file_dir]:
refined_test_collection_map[file] = file
else:
refined_test_collection_map[file_dir] = " ".join(sorted(raw_test_collection_map[file_dir]))
sorted_file_dirs = sorted(refined_test_collection_map.keys())
test_collection_map = {}
for file_dir in sorted_file_dirs:
test_collection_map[file_dir] = refined_test_collection_map[file_dir]
num_jobs = len(test_collection_map)
num_jobs_per_splits = num_jobs // args.num_splits
file_directory_splits = []
end = 0
for idx in range(args.num_splits):
start = end
end = start + num_jobs_per_splits + (1 if idx < num_jobs % args.num_splits else 0)
file_directory_splits.append(sorted_file_dirs[start:end])
if args.only_return_keys:
print(file_directory_splits)
else:
print(dict(test_collection_map))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to get the files against which we will run doc testing.
This uses `tests_fetcher.get_all_doctest_files` then groups the test files by their directory paths.
The files in `docs/source/en/model_doc` or `docs/source/en/tasks` are **NOT** grouped together with other files in the
same directory: the objective is to run doctest against them in independent GitHub Actions jobs.
Assume we are under `transformers` root directory:
To get a map (dictionary) between directory (or file) paths and the corresponding files
```bash
python utils/split_doctest_jobs.py
```
or to get a list of lists of directory (or file) paths
```bash
python utils/split_doctest_jobs.py --only_return_keys --num_splits 4
```
(this is used to allow GitHub Actions to generate more than 256 jobs using matrix)
"""
import argparse
from collections import defaultdict
from pathlib import Path
from tests_fetcher import get_all_doctest_files
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--only_return_keys",
action="store_true",
help="if to only return the keys (which is a list of list of files' directory or file paths).",
)
parser.add_argument(
"--num_splits",
type=int,
default=1,
help="the number of splits into which the (flat) list of directory/file paths will be split. This has effect only if `only_return_keys` is `True`.",
)
args = parser.parse_args()
all_doctest_files = get_all_doctest_files()
raw_test_collection_map = defaultdict(list)
for file in all_doctest_files:
file_dir = "/".join(Path(file).parents[0].parts)
raw_test_collection_map[file_dir].append(file)
refined_test_collection_map = {}
for file_dir in raw_test_collection_map.keys():
if file_dir in ["docs/source/en/model_doc", "docs/source/en/tasks"]:
for file in raw_test_collection_map[file_dir]:
refined_test_collection_map[file] = file
else:
refined_test_collection_map[file_dir] = " ".join(sorted(raw_test_collection_map[file_dir]))
sorted_file_dirs = sorted(refined_test_collection_map.keys())
test_collection_map = {}
for file_dir in sorted_file_dirs:
test_collection_map[file_dir] = refined_test_collection_map[file_dir]
num_jobs = len(test_collection_map)
num_jobs_per_splits = num_jobs // args.num_splits
file_directory_splits = []
end = 0
for idx in range(args.num_splits):
start = end
end = start + num_jobs_per_splits + (1 if idx < num_jobs % args.num_splits else 0)
file_directory_splits.append(sorted_file_dirs[start:end])
if args.only_return_keys:
print(file_directory_splits)
else:
print(dict(test_collection_map))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import unittest
from unittest.mock import MagicMock, patch
import pytest
from mmdet.datasets import DATASETS
@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
@pytest.mark.parametrize('dataset',
['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if dataset in ['CocoDataset', 'CityscapesDataset']:
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=('bus', 'car'),
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ('bus', 'car')
print(custom_dataset)
# Test setting classes as a list
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['bus', 'car'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
# Test overriding not a subset
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['foo'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['foo']
print(custom_dataset)
# Test default behavior
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=None,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES == original_classes
print(custom_dataset)
# Test sending file path
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
path = tmpdir + 'classes.txt'
with open(path, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=path,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
class CustomDatasetTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = osp.join(
osp.dirname(osp.dirname(osp.dirname(__file__))), 'data')
self.dataset_class = DATASETS.get('XMLDataset')
def test_data_infos__default_db_directories(self):
"""Test correct data read having a Pacal-VOC directory structure."""
test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main',
'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True)
self.assertListEqual([{
'id': '000001',
'filename': osp.join('JPEGImages', '000001.jpg'),
'width': 353,
'height': 500
}], custom_ds.data_infos)
def test_data_infos__overridden_db_subdirectories(self):
"""Test correct data read having a customized directory structure."""
test_dataset_root = osp.join(self.data_dir, 'custom_dataset')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=osp.join(test_dataset_root, 'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True,
img_prefix='',
img_subdir='images',
ann_subdir='images')
self.assertListEqual([{
'id': '000001',
'filename': osp.join('images', '000001.jpg'),
'width': 353,
'height': 500
}], custom_ds.data_infos)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import unittest
from unittest.mock import MagicMock, patch
import pytest
from mmdet.datasets import DATASETS
@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
@pytest.mark.parametrize('dataset',
['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if dataset in ['CocoDataset', 'CityscapesDataset']:
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=('bus', 'car'),
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ('bus', 'car')
print(custom_dataset)
# Test setting classes as a list
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['bus', 'car'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
# Test overriding not a subset
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['foo'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['foo']
print(custom_dataset)
# Test default behavior
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=None,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES == original_classes
print(custom_dataset)
# Test sending file path
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=tmp_file.name,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
tmp_file.close()
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
class CustomDatasetTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'data')
self.dataset_class = DATASETS.get('XMLDataset')
def test_data_infos__default_db_directories(self):
"""Test correct data read having a Pacal-VOC directory structure."""
test_dataset_root = os.path.join(self.data_dir, 'VOCdevkit', 'VOC2007')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=os.path.join(test_dataset_root, 'ImageSets', 'Main',
'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True)
self.assertListEqual([{
'id': '000001',
'filename': 'JPEGImages/000001.jpg',
'width': 353,
'height': 500
}], custom_ds.data_infos)
def test_data_infos__overridden_db_subdirectories(self):
"""Test correct data read having a customized directory structure."""
test_dataset_root = os.path.join(self.data_dir, 'custom_dataset')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=os.path.join(test_dataset_root, 'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True,
img_prefix='',
img_subdir='images',
ann_subdir='images')
self.assertListEqual([{
'id': '000001',
'filename': 'images/000001.jpg',
'width': 353,
'height': 500
}], custom_ds.data_infos)
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
msg = (
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
raise ValueError(msg)
if len(chain.input_keys) != 1:
msg = (
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None and len(names) != len(chains):
msg = "Length of chains does not match length of names."
raise ValueError(msg)
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
name = self.names[i] if self.names is not None else str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
msg = (
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
raise ValueError(msg)
if len(chain.input_keys) != 1:
msg = (
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
raise ValueError(msg)
if names is not None:
if len(names) != len(chains):
msg = "Length of chains does not match length of names."
raise ValueError(msg)
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
def get_doc_value():
return 'MyExecutorAfterReload'
|
def get_doc_value():
return 'MyExecutorAfterReload'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.