input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import asyncio
from typing import Any, List, Optional
from zhipuai import ZhipuAI as ZhipuAIClient
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
class ZhipuAIEmbedding(BaseEmbedding):
"""
ZhipuAI LLM.
Visit https://open.bigmodel.cn to get more information about ZhipuAI.
Examples:
`pip install llama-index-embeddings-zhipuai`
```python
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
embedding = ZhipuAIEmbedding(model="embedding-2", api_key="YOUR API KEY")
response = embedding.get_general_text_embedding("who are you?")
print(response)
```
"""
model: str = Field(description="The ZhipuAI model to use.")
api_key: Optional[str] = Field(
default=None,
description="The API key to use for the ZhipuAI API.",
)
dimensions: Optional[int] = Field(
default=1024,
description=(
"The number of dimensions the resulting output embeddings should have. "
"Only supported in embedding-3 and later models. embedding-2 is fixed at 1024."
),
)
timeout: Optional[float] = Field(
default=None,
description="The timeout to use for the ZhipuAI API.",
)
_client: Optional[ZhipuAIClient] = PrivateAttr()
def __init__(
self,
model: str,
api_key: str,
dimensions: Optional[int] = 1024,
timeout: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
super().__init__(
model=model,
dimensions=dimensions,
timeout=timeout,
callback_manager=callback_manager,
**kwargs,
)
self._client = ZhipuAIClient(api_key=api_key)
@classmethod
def class_name(cls) -> str:
return "ZhipuAIEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self.get_general_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self.aget_general_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self.get_general_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return await self.aget_general_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embeddings_list: List[List[float]] = []
for text in texts:
embeddings = self.get_general_text_embedding(text)
embeddings_list.append(embeddings)
return embeddings_list
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await asyncio.gather(
*[self.aget_general_text_embedding(text) for text in texts]
)
def get_general_text_embedding(self, text: str) -> List[float]:
"""Get ZhipuAI embeddings."""
response = self._client.embeddings.create(
model=self.model,
input=text,
dimensions=self.dimensions,
timeout=self.timeout,
)
return response.data[0].embedding
async def aget_general_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get ZhipuAI embeddings."""
response = await asyncio.to_thread(
self._client.embeddings.create,
model=self.model,
input=text,
dimensions=self.dimensions,
timeout=self.timeout,
)
return response.data[0].embedding
|
import asyncio
from typing import Any, List, Optional
from zhipuai import ZhipuAI as ZhipuAIClient
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
class ZhipuAIEmbedding(BaseEmbedding):
"""ZhipuAI LLM.
Visit https://open.bigmodel.cn to get more information about ZhipuAI.
Examples:
`pip install llama-index-embeddings-zhipuai`
```python
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
embedding = ZhipuAIEmbedding(model="embedding-2", api_key="YOUR API KEY")
response = embedding.get_general_text_embedding("who are you?")
print(response)
```
"""
model: str = Field(description="The ZhipuAI model to use.")
api_key: Optional[str] = Field(
default=None,
description="The API key to use for the ZhipuAI API.",
)
dimensions: Optional[int] = Field(
default=1024,
description=(
"The number of dimensions the resulting output embeddings should have. "
"Only supported in embedding-3 and later models. embedding-2 is fixed at 1024."
),
)
timeout: Optional[float] = Field(
default=None,
description="The timeout to use for the ZhipuAI API.",
)
_client: Optional[ZhipuAIClient] = PrivateAttr()
def __init__(
self,
model: str,
api_key: str,
dimensions: Optional[int] = 1024,
timeout: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
super().__init__(
model=model,
dimensions=dimensions,
timeout=timeout,
callback_manager=callback_manager,
**kwargs,
)
self._client = ZhipuAIClient(api_key=api_key)
@classmethod
def class_name(cls) -> str:
return "ZhipuAIEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self.get_general_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self.aget_general_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self.get_general_text_embedding(text)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return await self.aget_general_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embeddings_list: List[List[float]] = []
for text in texts:
embeddings = self.get_general_text_embedding(text)
embeddings_list.append(embeddings)
return embeddings_list
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await asyncio.gather(
*[self.aget_general_text_embedding(text) for text in texts]
)
def get_general_text_embedding(self, text: str) -> List[float]:
"""Get ZhipuAI embeddings."""
response = self._client.embeddings.create(
model=self.model,
input=text,
dimensions=self.dimensions,
timeout=self.timeout,
)
return response.data[0].embedding
async def aget_general_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get ZhipuAI embeddings."""
response = await asyncio.to_thread(
self._client.embeddings.create,
model=self.model,
input=text,
dimensions=self.dimensions,
timeout=self.timeout,
)
return response.data[0].embedding
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Querie: num_rows: 49.92307692307692, num_cols: 30522.0, row_non_zero_mean: 74.91560451801007, row_sparsity_mean: 0.9975455219929035
Average Corpus: num_rows: 4334.7692307692305, num_cols: 30522.0, row_non_zero_mean: 174.81000049297626, row_sparsity_mean: 0.9942726905529315
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.75%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.24%
MRR@10: 0.6821
NDCG@10: 0.6204
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseNanoBEIREvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Querie: num_rows: 49.92307692307692, num_cols: 30522.0, row_non_zero_mean: 74.91560451801007, row_sparsity_mean: 0.9975455219929035
Average Corpus: num_rows: 4334.7692307692305, num_cols: 30522.0, row_non_zero_mean: 174.81000049297626, row_sparsity_mean: 0.9942726905529315
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.75%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.24%
MRR@10: 0.6821
NDCG@10: 0.6204
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
from jina.serve.runtimes.gateway.streamer import GatewayStreamer
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_SCHEMA_FILENAME = 'gateway.json'
args = set_gateway_parser().parse_args([])
logger = JinaLogger('')
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_no_reduce = json.loads(args.deployments_no_reduce)
streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_no_reduce=deployments_no_reduce,
timeout_send=args.timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=args.name,
prefetch=args.prefetch,
logger=logger,
)
gateway_app = get_fastapi_app(
streamer=streamer,
title=args.title,
description=args.description,
no_debug_endpoints=args.no_debug_endpoints,
no_crud_endpoints=args.no_crud_endpoints,
expose_endpoints=args.expose_endpoints,
expose_graphql_endpoint=args.expose_graphql_endpoint,
cors=args.cors,
logger=logger,
)
gateway_schema = gateway_app.openapi()
gateway_schema['info']['x-logo'] = {'url': JINA_LOGO_URL}
gateway_schema['servers'] = []
gateway_schema['servers'].append(
{'url': f'http://localhost:{args.port}', 'description': 'Local Jina gateway'}
)
with open(GATEWAY_SCHEMA_FILENAME, 'w') as f:
json.dump(gateway_schema, f)
|
import json
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
from jina.serve.streamer import GatewayStreamer
JINA_LOGO_URL = 'https://api.jina.ai/logo/logo-product/jina-core/horizontal-layout/colored/Product%20logo_Core_vertical_colorful%402x-margin.png'
GATEWAY_SCHEMA_FILENAME = 'gateway.json'
args = set_gateway_parser().parse_args([])
logger = JinaLogger('')
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_no_reduce = json.loads(args.deployments_no_reduce)
streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_no_reduce=deployments_no_reduce,
timeout_send=args.timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=args.name,
prefetch=args.prefetch,
logger=logger,
)
gateway_app = get_fastapi_app(
streamer=streamer,
title=args.title,
description=args.description,
no_debug_endpoints=args.no_debug_endpoints,
no_crud_endpoints=args.no_crud_endpoints,
expose_endpoints=args.expose_endpoints,
expose_graphql_endpoint=args.expose_graphql_endpoint,
cors=args.cors,
logger=logger,
)
gateway_schema = gateway_app.openapi()
gateway_schema['info']['x-logo'] = {'url': JINA_LOGO_URL}
gateway_schema['servers'] = []
gateway_schema['servers'].append(
{'url': f'http://localhost:{args.port}', 'description': 'Local Jina gateway'}
)
with open(GATEWAY_SCHEMA_FILENAME, 'w') as f:
json.dump(gateway_schema, f)
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
class PostgresIndexStore(KVIndexStore):
"""
Postgres Index store.
Args:
postgres_kvstore (PostgresKVStore): Postgres key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
postgres_kvstore: PostgresKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a PostgresIndexStore."""
super().__init__(
postgres_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
@classmethod
def from_uri(
cls,
uri: str,
namespace: Optional[str] = None,
table_name: str = "indexstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
collection_suffix: Optional[str] = None,
) -> "PostgresIndexStore":
"""Load a PostgresIndexStore from a PostgresURI."""
postgres_kvstore = PostgresKVStore.from_uri(
uri=uri,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace, collection_suffix)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
namespace: Optional[str] = None,
table_name: str = "indexstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
collection_suffix: Optional[str] = None,
) -> "PostgresIndexStore":
"""Load a PostgresIndexStore from a Postgres host and port."""
postgres_kvstore = PostgresKVStore.from_params(
host=host,
port=port,
database=database,
user=user,
password=password,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace, collection_suffix)
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
class PostgresIndexStore(KVIndexStore):
"""Postgres Index store.
Args:
postgres_kvstore (PostgresKVStore): Postgres key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
postgres_kvstore: PostgresKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a PostgresIndexStore."""
super().__init__(
postgres_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
@classmethod
def from_uri(
cls,
uri: str,
namespace: Optional[str] = None,
table_name: str = "indexstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
collection_suffix: Optional[str] = None,
) -> "PostgresIndexStore":
"""Load a PostgresIndexStore from a PostgresURI."""
postgres_kvstore = PostgresKVStore.from_uri(
uri=uri,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace, collection_suffix)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
namespace: Optional[str] = None,
table_name: str = "indexstore",
schema_name: str = "public",
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
collection_suffix: Optional[str] = None,
) -> "PostgresIndexStore":
"""Load a PostgresIndexStore from a Postgres host and port."""
postgres_kvstore = PostgresKVStore.from_params(
host=host,
port=port,
database=database,
user=user,
password=password,
table_name=table_name,
schema_name=schema_name,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
)
return cls(postgres_kvstore, namespace, collection_suffix)
|
import pytest
from llama_index.embeddings.modelscope.base import ModelScopeEmbedding
@pytest.fixture()
def modelscope_embedding():
return ModelScopeEmbedding()
@pytest.fixture()
def query():
return "吃完海鲜可以喝牛奶吗?"
@pytest.fixture()
def text():
return [
"不可以,早晨喝牛奶不科学",
"吃了海鲜后是不能再喝牛奶的,因为牛奶中含得有维生素C,如果海鲜喝牛奶一起服用会对人体造成一定的伤害",
"吃海鲜是不能同时喝牛奶吃水果,这个至少间隔6小时以上才可以。",
"吃海鲜是不可以吃柠檬的因为其中的维生素C会和海鲜中的矿物质形成砷",
]
@pytest.mark.single
def test_modelscope_query(modelscope_embedding, query):
sentence_embedding = modelscope_embedding.get_query_embedding(query)
assert sentence_embedding is not None
assert len(sentence_embedding) > 0
@pytest.mark.single
def test_modelscope_text(modelscope_embedding, query):
sentence_embedding = modelscope_embedding.get_text_embedding(query)
assert sentence_embedding is not None
assert len(sentence_embedding) > 0
@pytest.mark.batch
def test_modelscope_text_embedding_batch(modelscope_embedding, text):
sentence_embedding = modelscope_embedding.get_text_embedding_batch(text)
assert sentence_embedding is not None
assert len(sentence_embedding) == len(text)
assert len(sentence_embedding[0]) > 0
@pytest.mark.asyncio
async def test_modelscope_async_query(modelscope_embedding, query):
sentence_embedding = await modelscope_embedding.aget_query_embedding(query)
assert sentence_embedding is not None
assert len(sentence_embedding) > 0
|
import pytest
from llama_index.embeddings.modelscope.base import ModelScopeEmbedding
@pytest.fixture()
def modelscope_embedding():
return ModelScopeEmbedding()
@pytest.fixture()
def query():
return "吃完海鲜可以喝牛奶吗?"
@pytest.fixture()
def text():
return [
"不可以,早晨喝牛奶不科学",
"吃了海鲜后是不能再喝牛奶的,因为牛奶中含得有维生素C,如果海鲜喝牛奶一起服用会对人体造成一定的伤害",
"吃海鲜是不能同时喝牛奶吃水果,这个至少间隔6小时以上才可以。",
"吃海鲜是不可以吃柠檬的因为其中的维生素C会和海鲜中的矿物质形成砷",
]
@pytest.mark.single()
def test_modelscope_query(modelscope_embedding, query):
sentence_embedding = modelscope_embedding.get_query_embedding(query)
assert sentence_embedding is not None
assert len(sentence_embedding) > 0
@pytest.mark.single()
def test_modelscope_text(modelscope_embedding, query):
sentence_embedding = modelscope_embedding.get_text_embedding(query)
assert sentence_embedding is not None
assert len(sentence_embedding) > 0
@pytest.mark.batch()
def test_modelscope_text_embedding_batch(modelscope_embedding, text):
sentence_embedding = modelscope_embedding.get_text_embedding_batch(text)
assert sentence_embedding is not None
assert len(sentence_embedding) == len(text)
assert len(sentence_embedding[0]) > 0
@pytest.mark.asyncio()
async def test_modelscope_async_query(modelscope_embedding, query):
sentence_embedding = await modelscope_embedding.aget_query_embedding(query)
assert sentence_embedding is not None
assert len(sentence_embedding) > 0
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=8,
num_workers=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# TODO support auto_scale_lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=8,
num_workers=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# TODO support auto_scale_lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List
import torch
import torch.nn as nn
from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer(model: nn.Module, cfg: dict) -> torch.optim.Optimizer:
"""Build function of optimizer.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer constructor, and use optimizer constructor to build the
optimizer. If ``constructor`` is not set, the
``DefaultOptimizerConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer and optimizer constructor.
default_scope (str, optional): The ``default_scope`` is used to
reset the current registry. Defaults to None.
Returns:
torch.optim.Optimizer: The built optimizer.
"""
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = OPTIMIZER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length: %s", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/msmarco-bm25", "triplet", split="train").select(range(100000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length: %s", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
model = dict(
detector=dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='ChannelMapper',
in_channels=[16],
out_channels=16,
kernel_size=3),
rpn_head=dict(
type='RPNHead',
in_channels=16,
feat_channels=16,
anchor_generator=dict(
type='AnchorGenerator',
scales=[4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=16,
fc_out_channels=32,
roi_feat_size=7,
num_classes=30,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.2, 0.2, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0))),
# detector training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=6000,
max_per_img=600,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.0001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
|
model = dict(
detector=dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='ChannelMapper',
in_channels=[2048],
out_channels=512,
kernel_size=3),
rpn_head=dict(
type='RPNHead',
in_channels=512,
feat_channels=512,
anchor_generator=dict(
type='AnchorGenerator',
scales=[4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=512,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=512,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=30,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.2, 0.2, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0))),
# detector training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=6000,
max_per_img=600,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.0001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
randomly setting activations to the negative saturation value.
Args:
rate: Float between 0 and 1. The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
noise_shape: 1D integer tensor representing the shape of the
binary alpha dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the alpha dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding alpha dropout) or in inference mode
(doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
noise_shape = self._get_concrete_noise_shape(
inputs, self.noise_shape
)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = ops.greater_equal(
ops.random.uniform(noise_shape, seed=self.seed_generator),
self.rate,
)
kept_idx = ops.cast(kept_idx, inputs.dtype)
# Compute affine transformation parameters
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def _get_concrete_noise_shape(self, inputs, noise_shape):
if noise_shape is None:
return ops.shape(inputs)
concrete_inputs_shape = ops.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
randomly setting activations to the negative saturation value.
Args:
rate: Float between 0 and 1. The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
noise_shape: 1D integer tensor representing the shape of the
binary alpha dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the alpha dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding alpha dropout) or in inference mode
(doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
def call(self, inputs, training=False):
if training and self.rate > 0:
noise_shape = self._get_concrete_noise_shape(
inputs, self.noise_shape
)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = ops.greater_equal(
ops.random.uniform(noise_shape, seed=self.seed_generator),
self.rate,
)
kept_idx = ops.cast(kept_idx, inputs.dtype)
# Compute affine transformation parameters
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def _get_concrete_noise_shape(self, inputs, noise_shape):
if noise_shape is None:
return ops.shape(inputs)
concrete_inputs_shape = ops.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_scp_270k_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
# 270k iterations with batch_size 64 is roughly equivalent to 144 epochs
'../common/ssj_scp_270k_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
# training schedule for 20e
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
# training schedule for 20e
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy.assertion import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!"
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!"
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.0.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": [
"datasets",
"accelerate>=0.20.3",
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.8.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.prototype.conv_emformer_test_impl import ConvEmformerTestImpl
@skipIfNoCuda
class ConvEmformerFloat32GPUTest(ConvEmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class ConvEmformerFloat64GPUTest(ConvEmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from torchaudio_unittest.prototype.conv_emformer_test_impl import ConvEmformerTestImpl
@skipIfNoCuda
class ConvEmformerFloat32GPUTest(ConvEmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class ConvEmformerFloat64GPUTest(ConvEmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from mmengine.config import Config
from mmengine.config.utils import (_get_cfg_metainfo,
_get_external_cfg_base_path,
_get_package_and_cfg_path)
from mmengine.registry import MODELS, DefaultScope
from mmengine.runner import load_checkpoint
from mmengine.utils import check_install_package, get_installed_path
def get_config(cfg_path: str, pretrained: bool = False) -> Config:
"""Get config from external package.
Args:
cfg_path (str): External relative config path.
pretrained (bool): Whether to save pretrained model path. If
``pretrained==True``, the url of pretrained model can be accessed
by ``cfg.model_path``. Defaults to False.
Examples:
>>> cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> # Equivalent to
>>> # cfg = Config.fromfile('/path/to/faster_rcnn_r50_fpn_1x_coco.py')
>>> cfg.model_path
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
Returns:
Config: A `Config` parsed from external package.
""" # noqa E301
# Get package name and relative config path.
package, cfg_path = _get_package_and_cfg_path(cfg_path)
# Check package is installed.
check_install_package(package)
package_path = get_installed_path(package)
try:
# Use `cfg_path` to search target config file.
cfg_meta = _get_cfg_metainfo(package_path, cfg_path)
cfg_path = osp.join(package_path, '.mim', cfg_meta['Config'])
cfg = Config.fromfile(cfg_path)
if pretrained:
assert 'Weights' in cfg_meta, ('Cannot find `Weights` in cfg_file'
'.metafile.yml, please check the'
'metafile')
cfg.model_path = cfg_meta['Weights']
except ValueError:
# Since the base config does not contain a metafile, the absolute
# config is `osp.join(package_path, cfg_path_prefix, cfg_name)`
cfg_path = _get_external_cfg_base_path(package_path, cfg_path)
cfg = Config.fromfile(cfg_path)
except Exception as e:
raise e
return cfg
def get_model(cfg_path: str, pretrained: bool = False, **kwargs):
"""Get built model from external package.
Args:
cfg_path (str): External relative config path with prefix
'package::' and without suffix.
pretrained (bool): Whether to load pretrained model. Defaults to False.
kwargs (dict): Default arguments to build model.
Examples:
>>> model = get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> type(model)
<class 'mmdet.models.detectors.faster_rcnn.FasterRCNN'>
Returns:
nn.Module: Built model.
""" # noqa E301
package = cfg_path.split('::')[0]
with DefaultScope.overwrite_default_scope(package): # type: ignore
cfg = get_config(cfg_path, pretrained)
models_module = importlib.import_module(f'{package}.utils')
models_module.register_all_modules() # type: ignore
model = MODELS.build(cfg.model, default_args=kwargs)
if pretrained:
load_checkpoint(model, cfg.model_path)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from mmengine.config import Config
from mmengine.config.utils import (_get_cfg_metainfo,
_get_external_cfg_base_path,
_get_package_and_cfg_path)
from mmengine.registry import MODELS, DefaultScope
from mmengine.runner import load_checkpoint
from mmengine.utils import check_install_package, get_installed_path
def get_config(cfg_path: str, pretrained: bool = False) -> Config:
"""Get config from external package.
Args:
cfg_path (str): External relative config path.
pretrained (bool): Whether to save pretrained model path. If
``pretrained==True``, the url of pretrained model can be accessed
by ``cfg.model_path``. Defaults to False.
Examples:
>>> cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco',
>>> pretrained=True)
>>> # Equivalent to
>>> Config.fromfile('/path/to/faster_rcnn_r50_fpn_1x_coco.py')
>>> cfg.model_path
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
Returns:
Config: A `Config` parsed from external package.
""" # noqa E301
# Get package name and relative config path.
package, cfg_path = _get_package_and_cfg_path(cfg_path)
# Check package is installed.
check_install_package(package)
package_path = get_installed_path(package)
try:
# Use `cfg_path` to search target config file.
cfg_meta = _get_cfg_metainfo(package_path, cfg_path)
cfg_path = osp.join(package_path, '.mim', cfg_meta['Config'])
cfg = Config.fromfile(cfg_path)
if pretrained:
assert 'Weights' in cfg_meta, ('Cannot find `Weights` in cfg_file'
'.metafile.yml, please check the'
'metafile')
cfg.model_path = cfg_meta['Weights']
except ValueError:
# Since the base config does not contain a metafile, the absolute
# config is `osp.join(package_path, cfg_path_prefix, cfg_name)`
cfg_path = _get_external_cfg_base_path(package_path, cfg_path)
cfg = Config.fromfile(cfg_path)
except Exception as e:
raise e
return cfg
def get_model(cfg_path: str, pretrained: bool = False, **kwargs):
"""Get built model from external package.
Args:
cfg_path (str): External relative config path with prefix
'package::' and without suffix.
pretrained (bool): Whether to load pretrained model. Defaults to False.
kwargs (dict): Default arguments to build model.
Returns:
nn.Module: Built model.
"""
package = cfg_path.split('::')[0]
with DefaultScope.overwrite_default_scope(package): # type: ignore
cfg = get_config(cfg_path, pretrained)
models_module = importlib.import_module(f'{package}.utils')
models_module.register_all_modules() # type: ignore
model = MODELS.build(cfg.model, default_args=kwargs)
if pretrained:
load_checkpoint(model, cfg.model_path)
return model
|
import os
from source_separation.utils.dataset import wsj0mix
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_FILENAMES = [
"012c0207_1.9952_01cc0202_-1.9952.wav",
"01co0302_1.63_014c020q_-1.63.wav",
"01do0316_0.24011_205a0104_-0.24011.wav",
"01lc020x_1.1301_027o030r_-1.1301.wav",
"01mc0202_0.34056_205o0106_-0.34056.wav",
"01nc020t_0.53821_018o030w_-0.53821.wav",
"01po030f_2.2136_40ko031a_-2.2136.wav",
"01ra010o_2.4098_403a010f_-2.4098.wav",
"01xo030b_0.22377_016o031a_-0.22377.wav",
"02ac020x_0.68566_01ec020b_-0.68566.wav",
"20co010m_0.82801_019c0212_-0.82801.wav",
"20da010u_1.2483_017c0211_-1.2483.wav",
"20oo010d_1.0631_01ic020s_-1.0631.wav",
"20sc0107_2.0222_20fo010h_-2.0222.wav",
"20tc010f_0.051456_404a0110_-0.051456.wav",
"407c0214_1.1712_02ca0113_-1.1712.wav",
"40ao030w_2.4697_20vc010a_-2.4697.wav",
"40pa0101_1.1087_40ea0107_-1.1087.wav",
]
def _mock_dataset(root_dir, num_speaker):
dirnames = ["mix"] + [f"s{i+1}" for i in range(num_speaker)]
for dirname in dirnames:
os.makedirs(os.path.join(root_dir, dirname), exist_ok=True)
seed = 0
sample_rate = 8000
expected = []
for filename in _FILENAMES:
mix = None
src = []
for dirname in dirnames:
waveform = get_whitenoise(sample_rate=8000, duration=1, n_channels=1, dtype="int16", seed=seed)
seed += 1
path = os.path.join(root_dir, dirname, filename)
save_wav(path, waveform, sample_rate)
waveform = normalize_wav(waveform)
if dirname == "mix":
mix = waveform
else:
src.append(waveform)
expected.append((sample_rate, mix, src))
return expected
class TestWSJ0Mix2(TempDirMixin, TorchaudioTestCase):
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 2)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=2, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
class TestWSJ0Mix3(TempDirMixin, TorchaudioTestCase):
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 3)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=3, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[2], expected_src[2], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
|
import os
from source_separation.utils.dataset import wsj0mix
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_FILENAMES = [
"012c0207_1.9952_01cc0202_-1.9952.wav",
"01co0302_1.63_014c020q_-1.63.wav",
"01do0316_0.24011_205a0104_-0.24011.wav",
"01lc020x_1.1301_027o030r_-1.1301.wav",
"01mc0202_0.34056_205o0106_-0.34056.wav",
"01nc020t_0.53821_018o030w_-0.53821.wav",
"01po030f_2.2136_40ko031a_-2.2136.wav",
"01ra010o_2.4098_403a010f_-2.4098.wav",
"01xo030b_0.22377_016o031a_-0.22377.wav",
"02ac020x_0.68566_01ec020b_-0.68566.wav",
"20co010m_0.82801_019c0212_-0.82801.wav",
"20da010u_1.2483_017c0211_-1.2483.wav",
"20oo010d_1.0631_01ic020s_-1.0631.wav",
"20sc0107_2.0222_20fo010h_-2.0222.wav",
"20tc010f_0.051456_404a0110_-0.051456.wav",
"407c0214_1.1712_02ca0113_-1.1712.wav",
"40ao030w_2.4697_20vc010a_-2.4697.wav",
"40pa0101_1.1087_40ea0107_-1.1087.wav",
]
def _mock_dataset(root_dir, num_speaker):
dirnames = ["mix"] + [f"s{i+1}" for i in range(num_speaker)]
for dirname in dirnames:
os.makedirs(os.path.join(root_dir, dirname), exist_ok=True)
seed = 0
sample_rate = 8000
expected = []
for filename in _FILENAMES:
mix = None
src = []
for dirname in dirnames:
waveform = get_whitenoise(sample_rate=8000, duration=1, n_channels=1, dtype="int16", seed=seed)
seed += 1
path = os.path.join(root_dir, dirname, filename)
save_wav(path, waveform, sample_rate)
waveform = normalize_wav(waveform)
if dirname == "mix":
mix = waveform
else:
src.append(waveform)
expected.append((sample_rate, mix, src))
return expected
class TestWSJ0Mix2(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 2)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=2, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
class TestWSJ0Mix3(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 3)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=3, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[2], expected_src[2], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain:
- an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`)
- an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`)
- an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
```
You can extend this Document:
```python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
# image.second_embedding = model(image.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
mmdoc = MultiModalDoc(
image=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes_.load()
```
"""
url: Optional[ImageUrl] = None
tensor: Optional[ImageTensor] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[ImageBytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain:
- an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`)
- an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`)
- an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
```
You can extend this Document:
```python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
# image.second_embedding = model(image.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
mmdoc = MultiModalDoc(
image=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes_.load()
```
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
import numpy as np
from docarray.base_document import AnyDocument, BaseDocument
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDocument):
text: str
tensor: NdArray
class CustomDoc(BaseDocument):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDocument(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
import numpy as np
from docarray.document import AnyDocument, BaseDocument
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDocument):
text: str
tensor: NdArray
class CustomDoc(BaseDocument):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDocument(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead'
]
|
import json
from typing import Any, Type, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
from .type import type_match
def to_dict(data) -> dict:
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
T = TypeVar("T")
@overload
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str, *args, **kwargs) -> Any: ...
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
def validate_with_jsonschema(
schema: dict[str, Any], data: dict[str, Any]
) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, schema)
return None
except jsonschema.ValidationError as e:
return str(e)
|
import json
from typing import Any, Type, TypeVar, overload
from fastapi.encoders import jsonable_encoder
from .type import type_match
def to_dict(data) -> dict:
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
T = TypeVar("T")
@overload
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str, *args, **kwargs) -> Any: ...
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
|
"""String output parser."""
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""StrOutputParser is serializable.
Returns:
True
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "output_parser"].
"""
return ["langchain", "schema", "output_parser"]
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
StrOutputParser.model_rebuild()
|
"""String output parser."""
from typing import Optional as Optional
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""StrOutputParser is serializable.
Returns:
True
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "output_parser"].
"""
return ["langchain", "schema", "output_parser"]
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
StrOutputParser.model_rebuild()
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from text_generation.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.0.1"):
return True
else:
raise ValueError(
"'text-generation-inference' version ",
f"incompatible with function call: {tgi_version}. ",
"Function call support was added in v2.0.1",
)
def get_max_input_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.1.0"):
return model_info.get("max_input_tokens", None)
else:
return model_info.get("max_input_length", None)
def get_max_total_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("max_total_tokens", None)
def get_model_name(url: str) -> Union[str, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("model_id", None)
def to_tgi_messages(messages: Sequence[ChatMessage]) -> Sequence[Message]:
out_messages = []
for m in messages:
tool_calls = m.additional_kwargs.get("tool_calls")
out_messages.append(
Message(role=m.role.value, content=m.content, tool_calls=tool_calls)
)
return out_messages
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
def resolve_tool_choice(
tools: Optional[List[dict]] = None, tool_choice: str = "none"
) -> Union[str, dict]:
"""Resolve tool choice.
Check if tool_name exists in tools.
Note that unlike in OpenAI specification, 'auto' will ALWAYS choose the tool for you.
Set to 'none' explicitly if do not wish to use tool.
"""
valid_tool_choices = ["none", "auto"] + [t["function"]["name"] for t in tools or []]
if tool_choice not in valid_tool_choices:
raise ValueError(
f"{tool_choice} is not a valid tool_choice. Must be one of {valid_tool_choices}"
)
return tool_choice
|
import requests
from packaging import version
from typing import Sequence, Union, List, Optional
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
)
from text_generation.types import (
Message,
)
def resolve_tgi_function_call(url: str) -> bool:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.0.1"):
return True
else:
raise ValueError(
"'text-generation-inference' version ",
f"incompatible with function call: {tgi_version}. ",
"Function call support was added in v2.0.1",
)
def get_max_input_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
tgi_version = model_info.get("version", None)
if version.parse(tgi_version) >= version.parse("2.1.0"):
return model_info.get("max_input_tokens", None)
else:
return model_info.get("max_input_length", None)
def get_max_total_tokens(url: str) -> Union[int, None]:
url = f"{url}/info"
model_info = dict(requests.get(url).json())
return model_info.get("max_total_tokens", None)
def to_tgi_messages(messages: Sequence[ChatMessage]) -> Sequence[Message]:
out_messages = []
for m in messages:
tool_calls = m.additional_kwargs.get("tool_calls")
out_messages.append(
Message(role=m.role.value, content=m.content, tool_calls=tool_calls)
)
return out_messages
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
def resolve_tool_choice(
tools: Optional[List[dict]] = None, tool_choice: str = "none"
) -> Union[str, dict]:
"""Resolve tool choice.
Check if tool_name exists in tools.
Note that unlike in OpenAI specification, 'auto' will ALWAYS choose the tool for you.
Set to 'none' explicitly if do not wish to use tool.
"""
valid_tool_choices = ["none", "auto"] + [t["function"]["name"] for t in tools or []]
if tool_choice not in valid_tool_choices:
raise ValueError(
f"{tool_choice} is not a valid tool_choice. Must be one of {valid_tool_choices}"
)
return tool_choice
|
"""IndexStructType class."""
from enum import Enum
class IndexStructType(str, Enum):
"""
Index struct type. Identifier for a "type" of index.
Attributes:
TREE ("tree"): Tree index. See :ref:`Ref-Indices-Tree` for tree indices.
LIST ("list"): Summary index. See :ref:`Ref-Indices-List` for summary indices.
KEYWORD_TABLE ("keyword_table"): Keyword table index. See
:ref:`Ref-Indices-Table`
for keyword table indices.
DICT ("dict"): Faiss Vector Store Index. See
:ref:`Ref-Indices-VectorStore`
for more information on the faiss vector store index.
SIMPLE_DICT ("simple_dict"): Simple Vector Store Index. See
:ref:`Ref-Indices-VectorStore`
for more information on the simple vector store index.
WEAVIATE ("weaviate"): Weaviate Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Weaviate vector store index.
PINECONE ("pinecone"): Pinecone Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Pinecone vector store index.
DEEPLAKE ("deeplake"): DeepLake Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Pinecone vector store index.
QDRANT ("qdrant"): Qdrant Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Qdrant vector store index.
LANCEDB ("lancedb"): LanceDB Vector Store Index
See :ref:`Ref-Indices-VectorStore`
for more information on the LanceDB vector store index.
MILVUS ("milvus"): Milvus Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Milvus vector store index.
CHROMA ("chroma"): Chroma Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Chroma vector store index.
OPENSEARCH ("opensearch"): Opensearch Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Opensearch vector store index.
MYSCALE ("myscale"): MyScale Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the MyScale vector store index.
CLICKHOUSE ("clickhouse"): ClickHouse Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the ClickHouse vector store index.
EPSILLA ("epsilla"): Epsilla Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Epsilla vector store index.
CHATGPT_RETRIEVAL_PLUGIN ("chatgpt_retrieval_plugin"): ChatGPT
retrieval plugin index.
SQL ("SQL"): SQL Structured Store Index.
See :ref:`Ref-Indices-StructStore`
for more information on the SQL vector store index.
DASHVECTOR ("dashvector"): DashVector Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Dashvecotor vector store index.
KG ("kg"): Knowledge Graph index.
See :ref:`Ref-Indices-Knowledge-Graph` for KG indices.
DOCUMENT_SUMMARY ("document_summary"): Document Summary Index.
See :ref:`Ref-Indices-Document-Summary` for Summary Indices.
"""
# TODO: refactor so these are properties on the base class
NODE = "node"
TREE = "tree"
LIST = "list"
KEYWORD_TABLE = "keyword_table"
# faiss
DICT = "dict"
# simple
SIMPLE_DICT = "simple_dict"
WEAVIATE = "weaviate"
PINECONE = "pinecone"
QDRANT = "qdrant"
LANCEDB = "lancedb"
MILVUS = "milvus"
CHROMA = "chroma"
MYSCALE = "myscale"
CLICKHOUSE = "clickhouse"
VECTOR_STORE = "vector_store"
OPENSEARCH = "opensearch"
DASHVECTOR = "dashvector"
CHATGPT_RETRIEVAL_PLUGIN = "chatgpt_retrieval_plugin"
DEEPLAKE = "deeplake"
EPSILLA = "epsilla"
# multimodal
MULTIMODAL_VECTOR_STORE = "multimodal"
# for SQL index
SQL = "sql"
# for KG index
KG = "kg"
SIMPLE_KG = "simple_kg"
SIMPLE_LPG = "simple_lpg"
NEBULAGRAPH = "nebulagraph"
FALKORDB = "falkordb"
# EMPTY
EMPTY = "empty"
COMPOSITE = "composite"
PANDAS = "pandas"
DOCUMENT_SUMMARY = "document_summary"
# Managed
VECTARA = "vectara"
POSTGRESML = "postgresml"
|
"""IndexStructType class."""
from enum import Enum
class IndexStructType(str, Enum):
"""Index struct type. Identifier for a "type" of index.
Attributes:
TREE ("tree"): Tree index. See :ref:`Ref-Indices-Tree` for tree indices.
LIST ("list"): Summary index. See :ref:`Ref-Indices-List` for summary indices.
KEYWORD_TABLE ("keyword_table"): Keyword table index. See
:ref:`Ref-Indices-Table`
for keyword table indices.
DICT ("dict"): Faiss Vector Store Index. See
:ref:`Ref-Indices-VectorStore`
for more information on the faiss vector store index.
SIMPLE_DICT ("simple_dict"): Simple Vector Store Index. See
:ref:`Ref-Indices-VectorStore`
for more information on the simple vector store index.
WEAVIATE ("weaviate"): Weaviate Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Weaviate vector store index.
PINECONE ("pinecone"): Pinecone Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Pinecone vector store index.
DEEPLAKE ("deeplake"): DeepLake Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Pinecone vector store index.
QDRANT ("qdrant"): Qdrant Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Qdrant vector store index.
LANCEDB ("lancedb"): LanceDB Vector Store Index
See :ref:`Ref-Indices-VectorStore`
for more information on the LanceDB vector store index.
MILVUS ("milvus"): Milvus Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Milvus vector store index.
CHROMA ("chroma"): Chroma Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Chroma vector store index.
OPENSEARCH ("opensearch"): Opensearch Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Opensearch vector store index.
MYSCALE ("myscale"): MyScale Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the MyScale vector store index.
CLICKHOUSE ("clickhouse"): ClickHouse Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the ClickHouse vector store index.
EPSILLA ("epsilla"): Epsilla Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Epsilla vector store index.
CHATGPT_RETRIEVAL_PLUGIN ("chatgpt_retrieval_plugin"): ChatGPT
retrieval plugin index.
SQL ("SQL"): SQL Structured Store Index.
See :ref:`Ref-Indices-StructStore`
for more information on the SQL vector store index.
DASHVECTOR ("dashvector"): DashVector Vector Store Index.
See :ref:`Ref-Indices-VectorStore`
for more information on the Dashvecotor vector store index.
KG ("kg"): Knowledge Graph index.
See :ref:`Ref-Indices-Knowledge-Graph` for KG indices.
DOCUMENT_SUMMARY ("document_summary"): Document Summary Index.
See :ref:`Ref-Indices-Document-Summary` for Summary Indices.
"""
# TODO: refactor so these are properties on the base class
NODE = "node"
TREE = "tree"
LIST = "list"
KEYWORD_TABLE = "keyword_table"
# faiss
DICT = "dict"
# simple
SIMPLE_DICT = "simple_dict"
WEAVIATE = "weaviate"
PINECONE = "pinecone"
QDRANT = "qdrant"
LANCEDB = "lancedb"
MILVUS = "milvus"
CHROMA = "chroma"
MYSCALE = "myscale"
CLICKHOUSE = "clickhouse"
VECTOR_STORE = "vector_store"
OPENSEARCH = "opensearch"
DASHVECTOR = "dashvector"
CHATGPT_RETRIEVAL_PLUGIN = "chatgpt_retrieval_plugin"
DEEPLAKE = "deeplake"
EPSILLA = "epsilla"
# multimodal
MULTIMODAL_VECTOR_STORE = "multimodal"
# for SQL index
SQL = "sql"
# for KG index
KG = "kg"
SIMPLE_KG = "simple_kg"
SIMPLE_LPG = "simple_lpg"
NEBULAGRAPH = "nebulagraph"
FALKORDB = "falkordb"
# EMPTY
EMPTY = "empty"
COMPOSITE = "composite"
PANDAS = "pandas"
DOCUMENT_SUMMARY = "document_summary"
# Managed
VECTARA = "vectara"
POSTGRESML = "postgresml"
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision useable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
if __name__ == "__main__":
main()
|
from typing import Any, Dict, Optional
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.azurecosmosnosql import AzureCosmosNoSqlKVStore
DEFAULT_DOCUMENT_DATABASE = "DocumentStoreDB"
DEFAULT_DOCUMENT_CONTAINER = "DocumentStoreContainer"
class AzureCosmosNoSqlDocumentStore(BaseKVStore):
"""Creates an AzureCosmosNoSqlDocumentStore."""
def __init__(
self,
azure_cosmos_nosql_kvstore: AzureCosmosNoSqlKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Initializes the AzureCosmosNoSqlDocumentStore."""
super().__init__(azure_cosmos_nosql_kvstore, namespace, collection_suffix)
@classmethod
def from_connection_string(
cls,
connection_string: str,
document_db_name: str = DEFAULT_DOCUMENT_DATABASE,
document_container_name: str = DEFAULT_DOCUMENT_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlDocumentStore":
"""Creates an instance of AzureCosmosNoSqlDocumentStore using a connection string."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_connection_string(
connection_string,
document_db_name,
document_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = document_db_name + "." + document_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_account_and_key(
cls,
endpoint: str,
key: str,
document_db_name: str = DEFAULT_DOCUMENT_DATABASE,
document_container_name: str = DEFAULT_DOCUMENT_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlDocumentStore":
"""Creates an instance of AzureCosmosNoSqlDocumentStore using an account endpoint and key."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_account_and_key(
endpoint,
key,
document_db_name,
document_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = document_db_name + "." + document_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_aad_token(
cls,
endpoint: str,
document_db_name: str = DEFAULT_DOCUMENT_DATABASE,
document_container_name: str = DEFAULT_DOCUMENT_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlDocumentStore":
"""Creates an instance of AzureCosmosNoSqlDocumentStore using an aad token."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_aad_token(
endpoint,
document_db_name,
document_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = document_db_name + "." + document_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
|
from typing import Any, Dict, Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.storage.kvstore.azurecosmosnosql import AzureCosmosNoSqlKVStore
DEFAULT_DOCUMENT_DATABASE = "DocumentStoreDB"
DEFAULT_DOCUMENT_CONTAINER = "DocumentStoreContainer"
class AzureCosmosNoSqlDocumentStore(KVDocumentStore):
"""Creates an AzureCosmosNoSqlDocumentStore."""
def __init__(
self,
azure_cosmos_nosql_kvstore: AzureCosmosNoSqlKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Initializes the AzureCosmosNoSqlDocumentStore."""
super().__init__(azure_cosmos_nosql_kvstore, namespace, collection_suffix)
@classmethod
def from_connection_string(
cls,
connection_string: str,
document_db_name: str = DEFAULT_DOCUMENT_DATABASE,
document_container_name: str = DEFAULT_DOCUMENT_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlDocumentStore":
"""Creates an instance of AzureCosmosNoSqlDocumentStore using a connection string."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_connection_string(
connection_string,
document_db_name,
document_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = document_db_name + "." + document_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_account_and_key(
cls,
endpoint: str,
key: str,
document_db_name: str = DEFAULT_DOCUMENT_DATABASE,
document_container_name: str = DEFAULT_DOCUMENT_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlDocumentStore":
"""Creates an instance of AzureCosmosNoSqlDocumentStore using an account endpoint and key."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_account_and_key(
endpoint,
key,
document_db_name,
document_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = document_db_name + "." + document_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
@classmethod
def from_aad_token(
cls,
endpoint: str,
document_db_name: str = DEFAULT_DOCUMENT_DATABASE,
document_container_name: str = DEFAULT_DOCUMENT_CONTAINER,
cosmos_container_properties: Dict[str, Any] = None,
cosmos_database_properties: Dict[str, Any] = None,
) -> "AzureCosmosNoSqlDocumentStore":
"""Creates an instance of AzureCosmosNoSqlDocumentStore using an aad token."""
azure_cosmos_nosql_kvstore = AzureCosmosNoSqlKVStore.from_aad_token(
endpoint,
document_db_name,
document_container_name,
cosmos_container_properties,
cosmos_database_properties,
)
namespace = document_db_name + "." + document_container_name
return cls(azure_cosmos_nosql_kvstore, namespace)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'SparseStaticEmbedding\.*': 1e-3}` for the
SparseStaticEmbedding module. This is useful when you want to fine-tune specific parts of the model
with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:
if version.parse(hfh.__version__).release < version.parse("0.11.0").release:
# old versions of hfh don't url-encode the file path
path = quote(path)
return hfh.hf_hub_url(repo_id, path, repo_type="dataset", revision=revision)
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:
if version.parse(hfh.__version__) < version.parse("0.11.0"):
# old versions of hfh don't url-encode the file path
path = quote(path)
return hfh.hf_hub_url(repo_id, path, repo_type="dataset", revision=revision)
|
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str) -> Dict[str, float]:
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
import re
from typing import TYPE_CHECKING, Any, Dict, Union
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str):
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 100k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(100_000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
labels=eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-nli"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CrossEncoderClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 100k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(100_000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
labels=eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-nli"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
if isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
if isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import MochiTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class MochiTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = MochiTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
# Overriding it because of the transformer size.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
}
@property
def input_shape(self):
return (4, 2, 16, 16)
@property
def output_shape(self):
return (4, 2, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"num_attention_heads": 2,
"attention_head_dim": 8,
"num_layers": 2,
"pooled_projection_dim": 16,
"in_channels": 4,
"out_channels": None,
"qk_norm": "rms_norm",
"text_embed_dim": 16,
"time_embed_dim": 4,
"activation_fn": "swiglu",
"max_sequence_length": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"MochiTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import MochiTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class MochiTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = MochiTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
}
@property
def input_shape(self):
return (4, 2, 16, 16)
@property
def output_shape(self):
return (4, 2, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"num_attention_heads": 2,
"attention_head_dim": 8,
"num_layers": 2,
"pooled_projection_dim": 16,
"in_channels": 4,
"out_channels": None,
"qk_norm": "rms_norm",
"text_embed_dim": 16,
"time_embed_dim": 4,
"activation_fn": "swiglu",
"max_sequence_length": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"MochiTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from unittest.mock import MagicMock
import torch
import torch.distributed as torch_dist
import torch.nn as nn
from mmengine.dist import all_gather
from mmengine.hooks import SyncBuffersHook
from mmengine.registry import MODELS
from mmengine.testing._internal import MultiProcessTestCase
from mmengine.testing.runner_test_case import RunnerTestCase, ToyModel
class ToyModuleWithNorm(ToyModel):
def __init__(self, data_preprocessor=None):
super().__init__(data_preprocessor=data_preprocessor)
bn = nn.BatchNorm1d(2)
self.linear1 = nn.Sequential(self.linear1, bn)
def init_weights(self):
for buffer in self.buffers():
buffer.fill_(
torch.tensor(int(os.environ['RANK']), dtype=torch.float32))
return super().init_weights()
class TestSyncBuffersHook(MultiProcessTestCase, RunnerTestCase):
def setUp(self) -> None:
super().setUp()
self._spawn_processes()
def prepare_subprocess(self):
MODELS.register_module(module=ToyModuleWithNorm, force=True)
super(MultiProcessTestCase, self).setUp()
def test_sync_buffers_hook(self):
self.setup_dist_env()
runner = MagicMock()
runner.model = ToyModuleWithNorm()
runner.model.init_weights()
for buffer in runner.model.buffers():
buffer1, buffer2 = all_gather(buffer)
self.assertFalse(torch.allclose(buffer1, buffer2))
hook = SyncBuffersHook()
hook.after_train_epoch(runner)
for buffer in runner.model.buffers():
buffer1, buffer2 = all_gather(buffer)
self.assertTrue(torch.allclose(buffer1, buffer2))
def test_with_runner(self):
self.setup_dist_env()
cfg = self.epoch_based_cfg
cfg.model = dict(type='ToyModuleWithNorm')
cfg.launch = 'pytorch'
cfg.custom_hooks = [dict(type='SyncBuffersHook')]
runner = self.build_runner(cfg)
runner.train()
for buffer in runner.model.buffers():
buffer1, buffer2 = all_gather(buffer)
self.assertTrue(torch.allclose(buffer1, buffer2))
def setup_dist_env(self):
super().setup_dist_env()
os.environ['RANK'] = str(self.rank)
torch_dist.init_process_group(
backend='gloo', rank=self.rank, world_size=self.world_size)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import SyncBuffersHook
class TestSyncBuffersHook:
def test_sync_buffers_hook(self):
runner = Mock()
runner.model = Mock()
hook = SyncBuffersHook()
hook._after_epoch(runner)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDocument):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.proto
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDocument):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save_to_wav_file(tmp_file)
assert os.path.isfile(tmp_file)
|
from abc import ABC
import pytest
from docarray import DocumentArray
from docarray.array.storage.memory import GetSetDelMixin, SequenceLikeMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
@pytest.mark.parametrize('distance', ['L2', 'IP', 'COSINE'])
@pytest.mark.parametrize(
'method,initial_cap,ef_construction,block_size',
[
('HNSW', 10, 250, 1000000),
('FLAT', 10, 250, 1000000),
],
)
@pytest.mark.parametrize(
'columns',
[
{'attr1': 'str', 'attr2': 'bytes'},
{'attr1': 'int', 'attr2': 'float'},
{'attr1': 'double', 'attr2': 'long', 'attr3': 'geo'},
],
)
@pytest.mark.parametrize(
'redis_config',
[
{'decode_responses': True},
{'decode_responses': False},
{'retry_on_timeout': True},
{'decode_responses': True, 'retry_on_timeout': True},
{},
],
)
def test_init_storage(
distance,
columns,
method,
initial_cap,
ef_construction,
block_size,
redis_config,
start_storage,
):
cfg = RedisConfig(
n_dim=128,
distance=distance,
columns=columns,
method=method,
initial_cap=initial_cap,
ef_construction=ef_construction,
block_size=block_size,
redis_config=redis_config,
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.info()['tcp_port'] == redis_da._config.port
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][0][1]
== b'embedding'
)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][0][5]
== b'VECTOR'
)
def test_init_storage_update_schema(start_storage):
cfg = RedisConfig(n_dim=128, columns={'attr1': 'str'}, index_name="idx")
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr1'
)
cfg = RedisConfig(
n_dim=128, columns={'attr2': 'str'}, index_name="idx", update_schema=False
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr1'
)
cfg = RedisConfig(
n_dim=128, columns={'attr2': 'str'}, index_name="idx", update_schema=True
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr2'
)
|
from abc import ABC
import pytest
from docarray import DocumentArray
from docarray.array.storage.memory import GetSetDelMixin, SequenceLikeMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
type_convert = {
'int': b'NUMERIC',
'float': b'NUMERIC',
'double': b'NUMERIC',
'long': b'NUMERIC',
'str': b'TEXT',
'bytes': b'TEXT',
'bool': b'NUMERIC',
}
@pytest.mark.parametrize('distance', ['L2', 'IP', 'COSINE'])
@pytest.mark.parametrize(
'method,initial_cap,ef_construction,block_size',
[
('HNSW', 10, 250, 1000000),
('FLAT', 10, 250, 1000000),
],
)
@pytest.mark.parametrize(
'columns',
[
[('attr1', 'str'), ('attr2', 'bytes')],
[('attr1', 'int'), ('attr2', 'float')],
[('attr1', 'double'), ('attr2', 'long'), ('attr3', 'int')],
{'attr1': 'str', 'attr2': 'bytes'},
{'attr1': 'int', 'attr2': 'float'},
{'attr1': 'double', 'attr2': 'long', 'attr3': 'int'},
],
)
@pytest.mark.parametrize(
'redis_config',
[
{'decode_responses': True},
{'decode_responses': False},
{'retry_on_timeout': True},
{'decode_responses': True, 'retry_on_timeout': True},
{},
],
)
def test_init_storage(
distance,
columns,
method,
initial_cap,
ef_construction,
block_size,
redis_config,
start_storage,
):
cfg = RedisConfig(
n_dim=128,
distance=distance,
columns=columns,
method=method,
initial_cap=initial_cap,
ef_construction=ef_construction,
block_size=block_size,
redis_config=redis_config,
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.info()['tcp_port'] == redis_da._config.port
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][0][1]
== b'embedding'
)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][0][5]
== b'VECTOR'
)
def test_init_storage_update_schema(start_storage):
cfg = RedisConfig(n_dim=128, columns={'attr1': 'str'}, index_name="idx")
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr1'
)
cfg = RedisConfig(
n_dim=128, columns={'attr2': 'str'}, index_name="idx", update_schema=False
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr1'
)
cfg = RedisConfig(
n_dim=128, columns={'attr2': 'str'}, index_name="idx", update_schema=True
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert (
redis_da._client.ft(index_name=redis_da._config.index_name).info()[
'attributes'
][1][1]
== b'attr2'
)
|
# Copyright (c) OpenMMLab. All rights reserved.
# This script consists of several convert functions which
# can modify the weights of model in original repo to be
# pre-trained weights.
from collections import OrderedDict
def swin_converter(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt['backbone.' + new_k] = new_v
return new_ckpt
|
# Copyright (c) OpenMMLab. All rights reserved.
# This script consists of several convert functions which
# can modify the weights of model in original repo to be
# pre-trained weights.
from collections import OrderedDict
def swin_converter(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt[new_k] = new_v
return new_ckpt
|
from __future__ import annotations
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders import ArcGISLoader
if TYPE_CHECKING:
from collections.abc import Iterator
from arcgis.features import FeatureLayer
from arcgis.gis import GIS
@pytest.fixture
def arcgis_mocks(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> Iterator[None]:
sys_modules = {
"arcgis": MagicMock(),
"arcgis.features.FeatureLayer": mock_feature_layer,
"arcgis.gis.GIS": mock_gis,
}
with patch.dict("sys.modules", sys_modules):
yield
@pytest.fixture
def mock_feature_layer() -> FeatureLayer:
feature_layer = MagicMock()
feature_layer.query.return_value = [
MagicMock(as_dict={"attributes": {"field": "value"}})
]
feature_layer.url = "https://example.com/layer_url"
feature_layer.properties = {
"description": "<html><body>Some HTML content</body></html>",
"name": "test",
"serviceItemId": "testItemId",
}
return feature_layer
@pytest.fixture
def mock_gis() -> GIS:
gis = MagicMock()
gis.content.get.return_value = MagicMock(description="Item description")
return gis
@pytest.mark.usefixtures("arcgis_mocks")
def test_lazy_load(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None:
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
loader.BEAUTIFULSOUP = None
documents = list(loader.lazy_load())
assert len(documents) == 1
assert documents[0].metadata["url"] == "https://example.com/layer_url"
# Add more assertions based on your expected behavior
@pytest.mark.usefixtures("arcgis_mocks")
def test_initialization_with_string_layer(
mock_feature_layer: FeatureLayer, mock_gis: GIS
) -> None:
layer_url = "https://example.com/layer_url"
with patch("arcgis.features.FeatureLayer", return_value=mock_feature_layer):
loader = ArcGISLoader(layer=layer_url, gis=mock_gis)
assert loader.url == layer_url
@pytest.mark.usefixtures("arcgis_mocks")
def test_layer_description_provided_by_user(
mock_feature_layer: FeatureLayer, mock_gis: GIS
) -> None:
custom_description = "Custom Layer Description"
loader = ArcGISLoader(
layer=mock_feature_layer, gis=mock_gis, lyr_desc=custom_description
)
layer_properties = loader._get_layer_properties(lyr_desc=custom_description)
assert layer_properties["layer_description"] == custom_description
def test_initialization_without_arcgis(
mock_feature_layer: FeatureLayer, mock_gis: GIS
) -> None:
with patch.dict("sys.modules", {"arcgis": None}):
with pytest.raises(
ImportError, match="arcgis is required to use the ArcGIS Loader"
):
ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
@pytest.mark.usefixtures("arcgis_mocks")
def test_get_layer_properties_with_description(
mock_feature_layer: FeatureLayer, mock_gis: GIS
) -> None:
loader = ArcGISLoader(
layer=mock_feature_layer, gis=mock_gis, lyr_desc="Custom Description"
)
props = loader._get_layer_properties("Custom Description")
assert props["layer_description"] == "Custom Description"
@pytest.mark.usefixtures("arcgis_mocks")
def test_load_method(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None:
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
documents = loader.load()
assert len(documents) == 1
@pytest.mark.usefixtures("arcgis_mocks")
def test_geometry_returned(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None:
mock_feature_layer.query.return_value = [
MagicMock(
as_dict={
"attributes": {"field": "value"},
"geometry": {"type": "point", "coordinates": [0, 0]},
}
)
]
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=True)
documents = list(loader.lazy_load())
assert "geometry" in documents[0].metadata
@pytest.mark.usefixtures("arcgis_mocks")
def test_geometry_not_returned(mock_feature_layer: FeatureLayer, mock_gis: GIS) -> None:
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=False)
documents = list(loader.lazy_load())
assert "geometry" not in documents[0].metadata
|
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders import ArcGISLoader
@pytest.fixture
def arcgis_mocks(mock_feature_layer, mock_gis): # type: ignore
sys_modules = {
"arcgis": MagicMock(),
"arcgis.features.FeatureLayer": mock_feature_layer,
"arcgis.gis.GIS": mock_gis,
}
with patch.dict("sys.modules", sys_modules):
yield
@pytest.fixture
def mock_feature_layer(): # type: ignore
feature_layer = MagicMock()
feature_layer.query.return_value = [
MagicMock(as_dict={"attributes": {"field": "value"}})
]
feature_layer.url = "https://example.com/layer_url"
feature_layer.properties = {
"description": "<html><body>Some HTML content</body></html>",
"name": "test",
"serviceItemId": "testItemId",
}
return feature_layer
@pytest.fixture
def mock_gis(): # type: ignore
gis = MagicMock()
gis.content.get.return_value = MagicMock(description="Item description")
return gis
def test_lazy_load(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
loader.BEAUTIFULSOUP = None
documents = list(loader.lazy_load())
assert len(documents) == 1
assert documents[0].metadata["url"] == "https://example.com/layer_url"
# Add more assertions based on your expected behavior
def test_initialization_with_string_layer( # type: ignore
arcgis_mocks, mock_feature_layer, mock_gis
):
layer_url = "https://example.com/layer_url"
with patch("arcgis.features.FeatureLayer", return_value=mock_feature_layer):
loader = ArcGISLoader(layer=layer_url, gis=mock_gis)
assert loader.url == layer_url
def test_layer_description_provided_by_user( # type: ignore
arcgis_mocks, mock_feature_layer, mock_gis
):
custom_description = "Custom Layer Description"
loader = ArcGISLoader(
layer=mock_feature_layer, gis=mock_gis, lyr_desc=custom_description
)
layer_properties = loader._get_layer_properties(lyr_desc=custom_description)
assert layer_properties["layer_description"] == custom_description
def test_initialization_without_arcgis(mock_feature_layer, mock_gis): # type: ignore
with patch.dict("sys.modules", {"arcgis": None}):
with pytest.raises(
ImportError, match="arcgis is required to use the ArcGIS Loader"
):
ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
def test_get_layer_properties_with_description( # type: ignore
arcgis_mocks, mock_feature_layer, mock_gis
):
loader = ArcGISLoader(
layer=mock_feature_layer, gis=mock_gis, lyr_desc="Custom Description"
)
props = loader._get_layer_properties("Custom Description")
assert props["layer_description"] == "Custom Description"
def test_load_method(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
documents = loader.load()
assert len(documents) == 1
def test_geometry_returned(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore
mock_feature_layer.query.return_value = [
MagicMock(
as_dict={
"attributes": {"field": "value"},
"geometry": {"type": "point", "coordinates": [0, 0]},
}
)
]
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=True)
documents = list(loader.lazy_load())
assert "geometry" in documents[0].metadata
def test_geometry_not_returned( # type: ignore
arcgis_mocks, mock_feature_layer, mock_gis
):
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=False)
documents = list(loader.lazy_load())
assert "geometry" not in documents[0].metadata
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset'
]
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from typing_extensions import override
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
@override
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (dict[str, Any]): The serialized chain.
inputs (dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
elif serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
@override
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
@override
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
@override
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
@override
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
@override
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from typing_extensions import override
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
@override
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
elif serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
@override
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
@override
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
@override
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
@override
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
@override
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
# TODO: support test-time augmentation
assert NotImplementedError
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
# TODO: support test-time augmentation
assert NotImplementedError
|
import importlib.machinery
import os
from torch.hub import _get_torch_home
_HOME = os.path.join(_get_torch_home(), "datasets", "vision")
_USE_SHARDED_DATASETS = False
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url # noqa: 401
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
os.add_dll_directory(lib_dir)
kernel32.SetErrorMode(prev_error_mode)
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
|
import importlib.machinery
import os
from torch.hub import _get_torch_home
_HOME = os.path.join(_get_torch_home(), "datasets", "vision")
_USE_SHARDED_DATASETS = False
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url # noqa: 401
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
import sys
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
if sys.version_info >= (3, 8):
os.add_dll_directory(lib_dir)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(lib_dir)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
raise err
kernel32.SetErrorMode(prev_error_mode)
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama, Dialog
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
if block.text:
parts.append({"text": block.text})
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
for tool_call in message.additional_kwargs.get("tool_calls", []):
parts.append(tool_call)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
parts.append(block.text)
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
import pytest
from llama_index.core.sparse_embeddings.mock_sparse_embedding import MockSparseEmbedding
text_embedding_map = {
"hello": {0: 0.25},
"world": {1: 0.5},
"foo": {2: 0.75},
}
@pytest.fixture()
def mock_sparse_embedding():
return MockSparseEmbedding(text_to_embedding=text_embedding_map)
def test_embedding_query(mock_sparse_embedding: MockSparseEmbedding):
query = "hello"
embedding = mock_sparse_embedding.get_text_embedding(query)
assert embedding == text_embedding_map[query]
def test_embedding_text(mock_sparse_embedding: MockSparseEmbedding):
text = "hello"
embedding = mock_sparse_embedding.get_text_embedding(text)
assert embedding == text_embedding_map[text]
def test_embedding_texts(mock_sparse_embedding: MockSparseEmbedding):
texts = ["hello", "world", "foo"]
embeddings = mock_sparse_embedding.get_text_embedding_batch(texts)
assert embeddings == [text_embedding_map[text] for text in texts]
def test_embedding_query_not_found(mock_sparse_embedding: MockSparseEmbedding):
query = "not_found"
embedding = mock_sparse_embedding.get_text_embedding(query)
assert embedding == mock_sparse_embedding.default_embedding
@pytest.mark.asyncio
async def test_embedding_query_async(mock_sparse_embedding: MockSparseEmbedding):
query = "hello"
embedding = await mock_sparse_embedding.aget_text_embedding(query)
assert embedding == text_embedding_map[query]
@pytest.mark.asyncio
async def test_embedding_text_async(mock_sparse_embedding: MockSparseEmbedding):
text = "hello"
embedding = await mock_sparse_embedding.aget_text_embedding(text)
assert embedding == text_embedding_map[text]
@pytest.mark.asyncio
async def test_embedding_texts_async(mock_sparse_embedding: MockSparseEmbedding):
texts = ["hello", "world", "foo"]
embeddings = await mock_sparse_embedding.aget_text_embedding_batch(texts)
assert embeddings == [text_embedding_map[text] for text in texts]
def test_similarity_search(mock_sparse_embedding: MockSparseEmbedding):
embedding1 = mock_sparse_embedding.get_text_embedding("hello")
embedding2 = mock_sparse_embedding.get_text_embedding("world")
similarity = mock_sparse_embedding.similarity(embedding1, embedding2)
assert similarity == 0.0
def test_aggregate_embeddings(mock_sparse_embedding: MockSparseEmbedding):
queries = ["hello", "world"]
embedding = mock_sparse_embedding.get_agg_embedding_from_queries(queries)
assert embedding == {0: 0.125, 1: 0.25}
@pytest.mark.asyncio
async def test_aggregate_embeddings_async(mock_sparse_embedding: MockSparseEmbedding):
queries = ["hello", "world"]
embedding = await mock_sparse_embedding.aget_agg_embedding_from_queries(queries)
assert embedding == {0: 0.125, 1: 0.25}
|
import pytest
from llama_index.core.sparse_embeddings.mock_sparse_embedding import MockSparseEmbedding
text_embedding_map = {
"hello": {0: 0.25},
"world": {1: 0.5},
"foo": {2: 0.75},
}
@pytest.fixture()
def mock_sparse_embedding():
return MockSparseEmbedding(text_to_embedding=text_embedding_map)
def test_embedding_query(mock_sparse_embedding: MockSparseEmbedding):
query = "hello"
embedding = mock_sparse_embedding.get_text_embedding(query)
assert embedding == text_embedding_map[query]
def test_embedding_text(mock_sparse_embedding: MockSparseEmbedding):
text = "hello"
embedding = mock_sparse_embedding.get_text_embedding(text)
assert embedding == text_embedding_map[text]
def test_embedding_texts(mock_sparse_embedding: MockSparseEmbedding):
texts = ["hello", "world", "foo"]
embeddings = mock_sparse_embedding.get_text_embedding_batch(texts)
assert embeddings == [text_embedding_map[text] for text in texts]
def test_embedding_query_not_found(mock_sparse_embedding: MockSparseEmbedding):
query = "not_found"
embedding = mock_sparse_embedding.get_text_embedding(query)
assert embedding == mock_sparse_embedding.default_embedding
@pytest.mark.asyncio()
async def test_embedding_query_async(mock_sparse_embedding: MockSparseEmbedding):
query = "hello"
embedding = await mock_sparse_embedding.aget_text_embedding(query)
assert embedding == text_embedding_map[query]
@pytest.mark.asyncio()
async def test_embedding_text_async(mock_sparse_embedding: MockSparseEmbedding):
text = "hello"
embedding = await mock_sparse_embedding.aget_text_embedding(text)
assert embedding == text_embedding_map[text]
@pytest.mark.asyncio()
async def test_embedding_texts_async(mock_sparse_embedding: MockSparseEmbedding):
texts = ["hello", "world", "foo"]
embeddings = await mock_sparse_embedding.aget_text_embedding_batch(texts)
assert embeddings == [text_embedding_map[text] for text in texts]
def test_similarity_search(mock_sparse_embedding: MockSparseEmbedding):
embedding1 = mock_sparse_embedding.get_text_embedding("hello")
embedding2 = mock_sparse_embedding.get_text_embedding("world")
similarity = mock_sparse_embedding.similarity(embedding1, embedding2)
assert similarity == 0.0
def test_aggregate_embeddings(mock_sparse_embedding: MockSparseEmbedding):
queries = ["hello", "world"]
embedding = mock_sparse_embedding.get_agg_embedding_from_queries(queries)
assert embedding == {0: 0.125, 1: 0.25}
@pytest.mark.asyncio()
async def test_aggregate_embeddings_async(mock_sparse_embedding: MockSparseEmbedding):
queries = ["hello", "world"]
embedding = await mock_sparse_embedding.aget_agg_embedding_from_queries(queries)
assert embedding == {0: 0.125, 1: 0.25}
|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction, AgentActionMessageLog
from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> list[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage or the previous messages plus a FunctionMessage that corresponds to
the original tool invocation
"""
if isinstance(agent_action, AgentActionMessageLog):
return [
*list(agent_action.message_log),
_create_function_message(agent_action, observation),
]
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
FunctionMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def format_to_openai_function_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
Raises:
ValueError: if the observation cannot be converted to a string.
"""
messages = []
for agent_action, observation in intermediate_steps:
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
# Backwards compatibility
format_to_openai_functions = format_to_openai_function_messages
|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction, AgentActionMessageLog
from langchain_core.messages import AIMessage, BaseMessage, FunctionMessage
def _convert_agent_action_to_messages(
agent_action: AgentAction, observation: str
) -> list[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage or the previous messages plus a FunctionMessage that corresponds to
the original tool invocation
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) + [
_create_function_message(agent_action, observation)
]
return [AIMessage(content=agent_action.log)]
def _create_function_message(
agent_action: AgentAction, observation: str
) -> FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
FunctionMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(
name=agent_action.tool,
content=content,
)
def format_to_openai_function_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
Raises:
ValueError: if the observation cannot be converted to a string.
"""
messages = []
for agent_action, observation in intermediate_steps:
messages.extend(_convert_agent_action_to_messages(agent_action, observation))
return messages
# Backwards compatibility
format_to_openai_functions = format_to_openai_function_messages
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/adaptive_layer_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, train_loss)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/adaptive_layer_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, train_loss)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for Video-LLaVA."""
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import is_vision_available
from ...utils.import_utils import requires
from ...video_processing_utils import (
BaseVideoProcessor,
)
if is_vision_available():
from ...image_utils import PILImageResampling
class VideoLlavaFastVideoProcessorInitKwargs(VideosKwargs): ...
@requires(backends=("torchvision",))
class VideoLlavaVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
valid_kwargs = VideoLlavaFastVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos"]
def __init__(self, **kwargs: Unpack[VideoLlavaFastVideoProcessorInitKwargs]):
super().__init__(**kwargs)
__all__ = ["VideoLlavaVideoProcessor"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video processor class for Video-LLaVA."""
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
)
from ...processing_utils import Unpack, VideosKwargs
from ...utils import is_vision_available
from ...utils.import_utils import requires
from ...video_processing_utils import (
BaseVideoProcessor,
)
if is_vision_available():
from ...image_utils import PILImageResampling
class VideoLlavaFastVideoProcessorInitKwargs(VideosKwargs): ...
@requires(backends=("torchvision",))
class VideoLlavaVideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
valid_kwargs = VideoLlavaFastVideoProcessorInitKwargs
model_input_names = ["pixel_values_videos"]
def __init__(self, **kwargs: Unpack[VideoLlavaFastVideoProcessorInitKwargs]):
super().__init__(**kwargs)
__all__ = ["VideoLlavaVideoProcessor"]
|
"""Init file of LlamaIndex."""
__version__ = "0.12.10"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.9"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainingArguments
from sentence_transformers.cross_encoder.evaluation import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.losses import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the Quora duplicates dataset: https://huggingface.co/datasets/sentence-transformers/quora-duplicates
logging.info("Read quora-duplicates train dataset")
dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train")
eval_dataset = dataset.select(range(10_000))
test_dataset = dataset.select(range(10_000, 20_000))
train_dataset = dataset.select(range(20_000, len(dataset)))
logging.info(train_dataset)
logging.info(eval_dataset)
logging.info(test_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-quora-duplicates"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainingArguments
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the Quora duplicates dataset: https://huggingface.co/datasets/sentence-transformers/quora-duplicates
logging.info("Read quora-duplicates train dataset")
dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train")
eval_dataset = dataset.select(range(10_000))
test_dataset = dataset.select(range(10_000, 20_000))
train_dataset = dataset.select(range(20_000, len(dataset)))
logging.info(train_dataset)
logging.info(eval_dataset)
logging.info(test_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-quora-duplicates"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
from abc import ABC
from typing import Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.torch_tensor import TorchTensor
T = TypeVar('T', bound='Embedding')
class EmbeddingMixin(AbstractTensor, ABC):
alternative_type: Optional[Type] = None
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
shape = super().__docarray_validate_getitem__(item)
if len(shape) > 1:
error_msg = f'`{cls}` can only have a single dimension/axis.'
if cls.alternative_type:
error_msg += f' Consider using {cls.alternative_type} instead.'
raise ValueError(error_msg)
return shape
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
Embedding = Union[NdArrayEmbedding, TorchEmbedding]
|
from abc import ABC
from typing import Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.torch_tensor import TorchTensor
T = TypeVar('T', bound='Embedding')
class EmbeddingMixin(AbstractTensor, ABC):
alternative_type: Optional[Type] = None
@classmethod
def __validate_getitem__(cls, item: Any) -> Tuple[int]:
shape = super().__validate_getitem__(item)
if len(shape) > 1:
error_msg = f'`{cls}` can only have a single dimension/axis.'
if cls.alternative_type:
error_msg += f' Consider using {cls.alternative_type} instead.'
raise ValueError(error_msg)
return shape
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
Embedding = Union[NdArrayEmbedding, TorchEmbedding]
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
return len(text) / len(zlib.compress(text.encode("utf-8")))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
return len(text) / len(zlib.compress(text.encode("utf-8")))
def format_timestamp(seconds: float, always_include_hours: bool = False):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True)} --> "
f"{format_timestamp(segment['end'], always_include_hours=True)}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.tools.types import BaseTool
from llama_index.llms.openai import OpenAI
from .step import LLMCompilerAgentWorker
class LLMCompilerAgentPack(BaseLlamaPack):
"""
LLMCompilerAgent pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or OpenAI(model="gpt-4")
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = LLMCompilerAgentWorker.from_tools(
tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {}),
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {}),
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.tools.types import BaseTool
from llama_index.llms.openai import OpenAI
from .step import LLMCompilerAgentWorker
class LLMCompilerAgentPack(BaseLlamaPack):
"""
LLMCompilerAgent pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or OpenAI(model="gpt-4")
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = LLMCompilerAgentWorker.from_tools(
tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {})
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {})
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
from .gateway import HTTPGateway
__all__ = ['HTTPGateway']
|
from .gateway import HTTPGateway
|
import importlib
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from fastapi.websockets import WebSocketDisconnect
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="app",
params=[
"tutorial002",
pytest.param("tutorial002_py310", marks=needs_py310),
"tutorial002_an",
pytest.param("tutorial002_an_py39", marks=needs_py39),
pytest.param("tutorial002_an_py310", marks=needs_py310),
],
)
def get_app(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.websockets.{request.param}")
return mod.app
def test_main(app: FastAPI):
client = TestClient(app)
response = client.get("/")
assert response.status_code == 200, response.text
assert b"<!DOCTYPE html>" in response.content
def test_websocket_with_cookie(app: FastAPI):
client = TestClient(app, cookies={"session": "fakesession"})
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/foo/ws") as websocket:
message = "Message one"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: fakesession"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: foo"
message = "Message two"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: fakesession"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: foo"
def test_websocket_with_header(app: FastAPI):
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/bar/ws?token=some-token") as websocket:
message = "Message one"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: bar"
message = "Message two"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: bar"
def test_websocket_with_header_and_query(app: FastAPI):
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/2/ws?q=3&token=some-token") as websocket:
message = "Message one"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == "Query parameter q is: 3"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: 2"
message = "Message two"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == "Query parameter q is: 3"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: 2"
def test_websocket_no_credentials(app: FastAPI):
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/foo/ws"):
pytest.fail(
"did not raise WebSocketDisconnect on __enter__"
) # pragma: no cover
def test_websocket_invalid_data(app: FastAPI):
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/foo/ws?q=bar&token=some-token"):
pytest.fail(
"did not raise WebSocketDisconnect on __enter__"
) # pragma: no cover
|
import pytest
from fastapi.testclient import TestClient
from fastapi.websockets import WebSocketDisconnect
from docs_src.websockets.tutorial002 import app
def test_main():
client = TestClient(app)
response = client.get("/")
assert response.status_code == 200, response.text
assert b"<!DOCTYPE html>" in response.content
def test_websocket_with_cookie():
client = TestClient(app, cookies={"session": "fakesession"})
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/foo/ws") as websocket:
message = "Message one"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: fakesession"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: foo"
message = "Message two"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: fakesession"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: foo"
def test_websocket_with_header():
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/bar/ws?token=some-token") as websocket:
message = "Message one"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: bar"
message = "Message two"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: bar"
def test_websocket_with_header_and_query():
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/2/ws?q=3&token=some-token") as websocket:
message = "Message one"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == "Query parameter q is: 3"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: 2"
message = "Message two"
websocket.send_text(message)
data = websocket.receive_text()
assert data == "Session cookie or query token value is: some-token"
data = websocket.receive_text()
assert data == "Query parameter q is: 3"
data = websocket.receive_text()
assert data == f"Message text was: {message}, for item ID: 2"
def test_websocket_no_credentials():
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/foo/ws"):
pytest.fail(
"did not raise WebSocketDisconnect on __enter__"
) # pragma: no cover
def test_websocket_invalid_data():
client = TestClient(app)
with pytest.raises(WebSocketDisconnect):
with client.websocket_connect("/items/foo/ws?q=bar&token=some-token"):
pytest.fail(
"did not raise WebSocketDisconnect on __enter__"
) # pragma: no cover
|
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
trans.append(transforms.PILToTensor())
trans.extend(
[
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans += [
transforms.Resize(resize_size, interpolation=interpolation, antialias=True),
transforms.CenterCrop(crop_size),
]
if backend == "pil":
trans.append(transforms.PILToTensor())
trans += [
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
|
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans.append(transforms.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
trans.append(autoaugment.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
trans.append(transforms.PILToTensor())
trans.extend(
[
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
):
trans = []
backend = backend.lower()
if backend == "tensor":
trans.append(transforms.PILToTensor())
else:
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
trans += [
transforms.Resize(resize_size, interpolation=interpolation, antialias=True),
transforms.CenterCrop(crop_size),
]
if backend == "pil":
trans.append(transforms.PILToTensor())
trans += [
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import SABLRetinaHead
class TestSABLRetinaHead(TestCase):
def test_sabl_retina_head(self):
"""Tests sabl retina head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': [1, 1],
}]
train_cfg = ConfigDict(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
sabl_retina_head = SABLRetinaHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),
train_cfg=train_cfg)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in sabl_retina_head.square_anchor_generator.strides)
outs = sabl_retina_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = sabl_retina_head.loss_by_feat(
*outs, [gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls']).item()
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_cls_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_box_reg_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = sabl_retina_head.loss_by_feat(*outs, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls']).item()
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_cls_loss, 0,
'box loss should be non-zero')
self.assertGreater(onegt_box_reg_loss, 0,
'centerness loss should be non-zero')
test_cfg = ConfigDict(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# test predict_by_feat
sabl_retina_head.predict_by_feat(
*outs, batch_img_metas=img_metas, cfg=test_cfg, rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet.models.dense_heads import SABLRetinaHead
class TestSABLRetinaHead(TestCase):
def test_sabl_retina_head(self):
"""Tests sabl retina head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': [1, 1],
}]
train_cfg = ConfigDict(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
sabl_retina_head = SABLRetinaHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),
train_cfg=train_cfg)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in sabl_retina_head.square_anchor_generator.strides)
outs = sabl_retina_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = sabl_retina_head.loss_by_feat(
*outs, [gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls']).item()
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_cls_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_box_reg_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = sabl_retina_head.loss_by_feat(*outs, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls']).item()
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_cls_loss, 0,
'box loss should be non-zero')
self.assertGreater(onegt_box_reg_loss, 0,
'centerness loss should be non-zero')
test_cfg = ConfigDict(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# test predict_by_feat
sabl_retina_head.predict_by_feat(
*outs, batch_img_metas=img_metas, cfg=test_cfg, rescale=True)
|
"""Simple reader that reads weather data from OpenWeatherMap API"""
from __future__ import annotations
from datetime import datetime
from typing import Iterator, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class WeatherDataLoader(BaseLoader):
"""Load weather data with `Open Weather Map` API.
Reads the forecast & current weather of any location using OpenWeatherMap's free
API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free
OpenWeatherMap API.
"""
def __init__(
self,
client: OpenWeatherMapAPIWrapper,
places: Sequence[str],
) -> None:
"""Initialize with parameters."""
super().__init__()
self.client = client
self.places = places
@classmethod
def from_params(
cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None
) -> WeatherDataLoader:
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key)
return cls(client, places)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load weather data for the given locations."""
for place in self.places:
metadata = {"queried_at": datetime.now()}
content = self.client.run(place)
yield Document(page_content=content, metadata=metadata)
|
"""Simple reader that reads weather data from OpenWeatherMap API"""
from __future__ import annotations
from datetime import datetime
from typing import Iterator, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class WeatherDataLoader(BaseLoader):
"""Load weather data with `Open Weather Map` API.
Reads the forecast & current weather of any location using OpenWeatherMap's free
API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free
OpenWeatherMap API.
"""
def __init__(
self,
client: OpenWeatherMapAPIWrapper,
places: Sequence[str],
) -> None:
"""Initialize with parameters."""
super().__init__()
self.client = client
self.places = places
@classmethod
def from_params(
cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None
) -> WeatherDataLoader:
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) # type: ignore[call-arg]
return cls(client, places)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load weather data for the given locations."""
for place in self.places:
metadata = {"queried_at": datetime.now()}
content = self.client.run(place)
yield Document(page_content=content, metadata=metadata)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"VPTQ (Vector Post-Training Quantization) integration file"
import torch.nn as nn
from accelerate import init_empty_weights
from vptq import VQuantLinear
def replace_with_vptq_linear(
model,
quantization_config=None,
modules_to_not_convert=None,
current_key_name=None,
has_been_replaced=False,
):
"""
Public method that recursively replaces the Linear layers of the given model with VPTQ quantized layers.
`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`VptqConfig`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list[`str`]`, *optional*, defaults to `["lm_head"]`):
Names of the modules to not convert in `VQuantLinear`. In practice we keep the `lm_head` in full precision
for numerical stability reasons.
current_key_name (`list`, *optional*):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
"""
modules_to_not_convert = ["lm_head"] if not modules_to_not_convert else modules_to_not_convert
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
layer_name = ".".join(current_key_name)
shared_layer_config = quantization_config.shared_layer_config
config_for_layers = quantization_config.config_for_layers
if (
isinstance(module, nn.Linear)
and layer_name not in modules_to_not_convert
and ((layer_name in config_for_layers) or (current_key_name[-1] in shared_layer_config))
):
layer_params = config_for_layers.get(layer_name, None) or shared_layer_config.get(
current_key_name[-1], None
)
with init_empty_weights():
in_features = module.in_features
out_features = module.out_features
model._modules[name] = VQuantLinear(
in_features,
out_features,
vector_lens=layer_params["vector_lens"],
num_centroids=layer_params["num_centroids"],
num_res_centroids=layer_params["num_res_centroids"],
group_num=layer_params["group_num"],
group_size=layer_params["group_size"],
outlier_size=layer_params["outlier_size"],
indices_as_float=layer_params["indices_as_float"],
enable_norm=layer_params["enable_norm"],
enable_perm=layer_params["enable_perm"],
is_indice_packed=True,
enable_proxy_error=False,
bias=module.bias is not None,
)
has_been_replaced = True
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(False)
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_vptq_linear(
module,
quantization_config=quantization_config,
modules_to_not_convert=modules_to_not_convert,
current_key_name=current_key_name,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"VPTQ (Vector Post-Training Quantization) integration file"
import torch.nn as nn
from accelerate import init_empty_weights
from vptq import VQuantLinear
def replace_with_vptq_linear(
model,
quantization_config=None,
modules_to_not_convert=None,
current_key_name=None,
has_been_replaced=False,
):
"""
Public method that recursively replaces the Linear layers of the given model with VPTQ quantized layers.
`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the
conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`VptqConfig`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`List[`str`]`, *optional*, defaults to `["lm_head"]`):
Names of the modules to not convert in `VQuantLinear`. In practice we keep the `lm_head` in full precision
for numerical stability reasons.
current_key_name (`list`, *optional*):
A list that contains the current key name. This is used for recursion and should not be passed by the user.
has_been_replaced (`bool`, *optional*):
A boolean that indicates if the conversion has been successful or not. This is used for recursion and
should not be passed by the user.
"""
modules_to_not_convert = ["lm_head"] if not modules_to_not_convert else modules_to_not_convert
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
layer_name = ".".join(current_key_name)
shared_layer_config = quantization_config.shared_layer_config
config_for_layers = quantization_config.config_for_layers
if (
isinstance(module, nn.Linear)
and layer_name not in modules_to_not_convert
and ((layer_name in config_for_layers) or (current_key_name[-1] in shared_layer_config))
):
layer_params = config_for_layers.get(layer_name, None) or shared_layer_config.get(
current_key_name[-1], None
)
with init_empty_weights():
in_features = module.in_features
out_features = module.out_features
model._modules[name] = VQuantLinear(
in_features,
out_features,
vector_lens=layer_params["vector_lens"],
num_centroids=layer_params["num_centroids"],
num_res_centroids=layer_params["num_res_centroids"],
group_num=layer_params["group_num"],
group_size=layer_params["group_size"],
outlier_size=layer_params["outlier_size"],
indices_as_float=layer_params["indices_as_float"],
enable_norm=layer_params["enable_norm"],
enable_perm=layer_params["enable_perm"],
is_indice_packed=True,
enable_proxy_error=False,
bias=module.bias is not None,
)
has_been_replaced = True
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(False)
if len(list(module.children())) > 0:
_, has_been_replaced = replace_with_vptq_linear(
module,
quantization_config=quantization_config,
modules_to_not_convert=modules_to_not_convert,
current_key_name=current_key_name,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.retrieval_precision_metric import (
RetrievalPrecisionMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class RetrievalPrecisionEvaluator(BaseEvaluator):
"""
Tonic Validate's retrieval precision metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = RetrievalPrecisionMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.retrieval_precision_metric import (
RetrievalPrecisionMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class RetrievalPrecisionEvaluator(BaseEvaluator):
"""
Tonic Validate's retrieval precision metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = RetrievalPrecisionMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Callable
import pytest
from jina import Flow
from ...audioclip_text import AudioCLIPTextEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=AudioCLIPTextEncoder) as flow:
resp = flow.post(
on="/index",
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, flip_tensor, generate_coordinate,
mask2ndarray, multi_apply, select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, flip_tensor, generate_coordinate,
mask2ndarray, multi_apply, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate'
]
|
"""
Opendal file and directory reader.
A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service.
"""
import asyncio
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Union, cast
from llama_index.core.readers import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class OpendalReader(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs,
) -> None:
"""
Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
import opendal
super().__init__()
self.path = path
self.file_extractor = file_extractor
self.op = opendal.AsyncOperator(scheme, **kwargs)
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
asyncio.run(download_file_from_opendal(self.op, temp_dir, self.path))
else:
asyncio.run(download_dir_from_opendal(self.op, temp_dir, self.path))
loader = SimpleDirectoryReader(temp_dir, file_extractor=self.file_extractor)
return loader.load_data()
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> str:
"""Download file from OpenDAL."""
import opendal
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
w.write(await r.read())
return filepath
async def download_dir_from_opendal(op: Any, temp_dir: str, dir: str) -> str:
"""Download directory from opendal."""
import opendal
op = cast(opendal.AsyncOperator, op)
async for obj in await op.scan(dir):
await download_file_from_opendal(op, temp_dir, obj.path)
|
"""
Opendal file and directory reader.
A loader that fetches a file or iterates through a directory on AWS S3 or other compatible service.
"""
import asyncio
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Union, cast
from llama_index.core.readers import SimpleDirectoryReader
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class OpendalReader(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs,
) -> None:
"""
Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
import opendal
super().__init__()
self.path = path
self.file_extractor = file_extractor
self.op = opendal.AsyncOperator(scheme, **kwargs)
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
asyncio.run(download_file_from_opendal(self.op, temp_dir, self.path))
else:
asyncio.run(download_dir_from_opendal(self.op, temp_dir, self.path))
loader = SimpleDirectoryReader(temp_dir, file_extractor=self.file_extractor)
return loader.load_data()
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> str:
"""Download file from OpenDAL."""
import opendal
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
w.write(await r.read())
return filepath
async def download_dir_from_opendal(op: Any, temp_dir: str, dir: str) -> str:
"""Download directory from opendal."""
import opendal
op = cast(opendal.AsyncOperator, op)
async for obj in await op.scan(dir):
await download_file_from_opendal(op, temp_dir, obj.path)
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
from jina.helper import random_port
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = random_port()
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(replicas=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
def test_grpc_custom_options():
f = Flow(grpc_server_options={'grpc.max_send_message_length': -1})
with f:
pass
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
from jina.helper import random_port
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = random_port()
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(replicas=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
def test_grpc_custom_otpions():
f = Flow(grpc_server_options={'grpc.max_send_message_length': -1})
with f:
pass
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
import botocore
def get_aws_service_client(
service_name: Optional[str] = None,
region_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile_name: Optional[str] = None,
max_retries: Optional[int] = 3,
timeout: Optional[float] = 60.0,
) -> "botocore.client.BaseClient":
try:
import boto3
import botocore
except ImportError:
raise ImportError(
"Please run `pip install boto3 botocore` to use AWS services."
)
config = botocore.config.Config(
retries={"max_attempts": max_retries or 0, "mode": "standard"},
connect_timeout=timeout,
)
try:
if not profile_name and aws_access_key_id:
session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
client = session.client(service_name, config=config) # type: ignore
else:
session = boto3.Session(profile_name=profile_name)
if region_name:
client = session.client(
service_name,
region_name=region_name,
config=config, # type: ignore
)
else:
client = session.client(service_name, config=config) # type: ignore
except Exception as e:
raise ValueError("Please verify the provided credentials.") from (e)
return client
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
import botocore
def get_aws_service_client(
service_name: Optional[str] = None,
region_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile_name: Optional[str] = None,
max_retries: Optional[int] = 3,
timeout: Optional[float] = 60.0,
) -> "botocore.client.BaseClient":
try:
import boto3
import botocore
except ImportError:
raise ImportError(
"Please run `pip install boto3 botocore` to use AWS services."
)
config = botocore.config.Config(
retries={"max_attempts": max_retries or 0, "mode": "standard"},
connect_timeout=timeout,
)
try:
if not profile_name and aws_access_key_id:
session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
client = session.client(service_name, config=config) # type: ignore
else:
session = boto3.Session(profile_name=profile_name)
if region_name:
client = session.client(
service_name, region_name=region_name, config=config # type: ignore
)
else:
client = session.client(service_name, config=config) # type: ignore
except Exception as e:
raise ValueError("Please verify the provided credentials.") from (e)
return client
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False,
**kwargs):
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network."""
# TODO: Currently not supported
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test function with test time augmentation.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
from typing import Union
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
try:
import torch # noqa: F401
except ImportError:
AnyEmbedding = Union[NdArrayEmbedding] # type: ignore
else:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa: F401
AnyEmbedding = Union[NdArrayEmbedding, TorchEmbedding] # type: ignore
__all__ = ['AnyEmbedding']
|
from typing import Union
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
try:
import torch # noqa: F401
except ImportError:
Embedding = Union[NdArrayEmbedding] # type: ignore
else:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa: F401
Embedding = Union[NdArrayEmbedding, TorchEmbedding] # type: ignore
__all__ = ['Embedding']
|
import json
import time
import pytest
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from prometheus_client import Summary
from jina.serve.instrumentation import MetricsTimer
@pytest.fixture
def metrics_setup():
metric_reader = InMemoryMetricReader()
meter_provider = MeterProvider(metric_readers=[metric_reader])
meter_provider = meter_provider
meter = meter_provider.get_meter('test')
yield metric_reader, meter
if hasattr(meter_provider, 'force_flush'):
metric_reader.force_flush()
if hasattr(meter_provider, 'shutdown'):
meter_provider.shutdown()
def test_timer_context(metrics_setup):
def _do_something():
time.sleep(0.1)
metric_reader, meter = metrics_setup
summary = Summary('time_taken', 'measure something')
histogram = meter.create_histogram(
name='time_taken', description='measure something'
)
with MetricsTimer(summary_metric=summary, histogram=histogram):
_do_something()
# Prometheus samples
summary_count_sample = [
sample.value for sample in list(summary._samples()) if '_count' == sample.name
]
assert 1.0 == summary_count_sample[0]
# OpenTelemetry samples
histogram_metric = json.loads(
metric_reader.get_metrics_data()
.resource_metrics[0]
.scope_metrics[0]
.metrics[0]
.to_json()
)
assert 'time_taken' == histogram_metric['name']
assert 1 == histogram_metric['data']['data_points'][0]['count']
def test_timer_decorator(metrics_setup):
metric_reader, meter = metrics_setup
summary = Summary('time_taken_decorator', 'measure something')
histogram = meter.create_histogram(
name='time_taken_decorator', description='measure something'
)
@MetricsTimer(summary, histogram)
def _sleep():
time.sleep(0.1)
_sleep()
# Prometheus samples
summary_count_sample = [
sample.value for sample in list(summary._samples()) if '_count' == sample.name
]
assert 1.0 == summary_count_sample[0]
# OpenTelemetry samples
histogram_metric = json.loads(
metric_reader.get_metrics_data()
.resource_metrics[0]
.scope_metrics[0]
.metrics[0]
.to_json()
)
assert 'time_taken_decorator' == histogram_metric['name']
assert 1 == histogram_metric['data']['data_points'][0]['count']
assert {} == histogram_metric['data']['data_points'][0]['attributes']
labels = {
'cat': 'meow',
'dog': 'woof',
}
@MetricsTimer(summary, histogram, labels)
def _sleep():
time.sleep(0.1)
_sleep()
# Prometheus samples
summary_count_sample = [
sample.value for sample in list(summary._samples()) if '_count' == sample.name
]
assert 2.0 == summary_count_sample[0]
# OpenTelemetry samples
histogram_metric = json.loads(
metric_reader.get_metrics_data()
.resource_metrics[0]
.scope_metrics[0]
.metrics[0]
.to_json()
)
assert 'time_taken_decorator' == histogram_metric['name']
assert 1 == histogram_metric['data']['data_points'][0]['count']
assert labels == histogram_metric['data']['data_points'][0]['attributes']
|
import json
import time
import pytest
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from prometheus_client import Summary
from jina.serve.instrumentation import MetricsTimer
@pytest.fixture
def metrics_setup():
metric_reader = InMemoryMetricReader()
meter_provider = MeterProvider(metric_readers=[metric_reader])
meter_provider = meter_provider
meter = meter_provider.get_meter('test')
yield metric_reader, meter
if hasattr(meter_provider, 'force_flush'):
metric_reader.force_flush()
if hasattr(meter_provider, 'shutdown'):
meter_provider.shutdown()
def test_timer_context(metrics_setup):
def _do_something():
time.sleep(0.1)
metric_reader, meter = metrics_setup
summary = Summary('time_taken', 'measure something')
histogram = meter.create_histogram(
name='time_taken', description='measure something'
)
with MetricsTimer(summary_metric=summary, histogram=histogram):
_do_something()
# Prometheus samples
summary_count_sample = [
sample.value for sample in list(summary._samples()) if '_count' == sample.name
]
assert 1.0 == summary_count_sample[0]
# OpenTelemetry samples
histogram_metric = json.loads(
metric_reader.get_metrics_data()
.resource_metrics[0]
.scope_metrics[0]
.metrics[0]
.to_json()
)
assert 'time_taken' == histogram_metric['name']
assert 1 == histogram_metric['data']['data_points'][0]['count']
def test_timer_decorator(metrics_setup):
metric_reader, meter = metrics_setup
summary = Summary('time_taken_decorator', 'measure something')
histogram = meter.create_histogram(
name='time_taken_decorator', description='measure something'
)
@MetricsTimer(summary, histogram)
def _sleep():
time.sleep(0.1)
_sleep()
# Prometheus samples
summary_count_sample = [
sample.value for sample in list(summary._samples()) if '_count' == sample.name
]
assert 1.0 == summary_count_sample[0]
# OpenTelemetry samples
histogram_metric = json.loads(
metric_reader.get_metrics_data()
.resource_metrics[0]
.scope_metrics[0]
.metrics[0]
.to_json()
)
assert 'time_taken_decorator' == histogram_metric['name']
assert 1 == histogram_metric['data']['data_points'][0]['count']
assert {} == histogram_metric['data']['data_points'][0]['attributes']
labels = {
'cat': 'meow',
'dog': 'woof',
}
@MetricsTimer(summary, histogram, labels)
def _sleep():
time.sleep(0.1)
_sleep()
# Prometheus samples
summary_count_sample = [
sample.value for sample in list(summary._samples()) if '_count' == sample.name
]
assert 2.0 == summary_count_sample[0]
# OpenTelemetry samples
histogram_metric = json.loads(
metric_reader.get_metrics_data()
.resource_metrics[0]
.scope_metrics[0]
.metrics[0]
.to_json()
)
assert 'time_taken_decorator' == histogram_metric['name']
assert 1 == histogram_metric['data']['data_points'][0]['count']
assert labels == histogram_metric['data']['data_points'][0]['attributes']
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
try:
import torch # noqa: F401
except ImportError:
AnyTensor = Union[NdArray] # type: ignore
else:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
try:
import torch # noqa: F401
except ImportError:
Tensor = Union[NdArray] # type: ignore
else:
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
Tensor = Union[NdArray, TorchTensor] # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be setted')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be setted')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocArray[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocArray[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocArray) -> DocArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batch(n_docs, batch_size, backend):
da = DocArray[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batch(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocArray[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocumentArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)]
)
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocumentArray[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocumentArray) -> DocumentArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDocument):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batch(n_docs, batch_size, backend):
da = DocumentArray[MyImage](
[MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
it = map_docs_batch(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocumentArray[MyImage])
|
import os
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
import xgboost as xgb
from xgboost import callback
class SupportedTasks(object):
TRAIN = "train"
class XGBoostTrainer(Executor):
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
client_key_path: str, client_cert_path: str, use_gpus: bool):
"""Trainer for federated XGBoost.
Args:
server_address: address for the gRPC server to connect to.
world_size: the number of sites.
server_cert_path: the path to the server certificate file.
client_key_path: the path to the client key file.
client_cert_path: the path to the client certificate file.
"""
super().__init__()
self._server_address = server_address
self._world_size = world_size
self._server_cert_path = server_cert_path
self._client_key_path = client_key_path
self._client_cert_path = client_cert_path
self._use_gpus = use_gpus
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Executing {task_name}")
try:
if task_name == SupportedTasks.TRAIN:
self._do_training(fl_ctx)
return make_reply(ReturnCode.OK)
else:
self.log_error(fl_ctx, f"{task_name} is not a supported task.")
return make_reply(ReturnCode.TASK_UNKNOWN)
except BaseException as e:
self.log_exception(fl_ctx,
f"Task {task_name} failed. Exception: {e.__str__()}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _do_training(self, fl_ctx: FLContext):
client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME)
rank = int(client_name.split('-')[1]) - 1
communicator_env = {
'xgboost_communicator': 'federated',
'federated_server_address': self._server_address,
'federated_world_size': self._world_size,
'federated_rank': rank,
'federated_server_cert': self._server_cert_path,
'federated_client_key': self._client_key_path,
'federated_client_cert': self._client_cert_path
}
with xgb.collective.CommunicatorContext(**communicator_env):
# Load file, file will not be sharded in federated mode.
if rank == 0:
label = '&label_column=0'
else:
label = ''
dtrain = xgb.DMatrix(f'higgs.train.csv?format=csv{label}', data_split_mode=1)
dtest = xgb.DMatrix(f'higgs.test.csv?format=csv{label}', data_split_mode=1)
# specify parameters via map
param = {
'validate_parameters': True,
'eta': 0.1,
'gamma': 1.0,
'max_depth': 8,
'min_child_weight': 100,
'tree_method': 'approx',
'grow_policy': 'depthwise',
'objective': 'binary:logistic',
'eval_metric': 'auc',
}
if self._use_gpus:
self.log_info(fl_ctx, 'GPUs are not currently supported by vertical federated XGBoost')
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
# number of boosting rounds
num_round = 10
bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2)
# Save the model.
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = workspace.get_run_dir(run_number)
bst.save_model(os.path.join(run_dir, "higgs.model.federated.vertical.json"))
xgb.collective.communicator_print("Finished training\n")
|
import os
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
import xgboost as xgb
from xgboost import callback
class SupportedTasks(object):
TRAIN = "train"
class XGBoostTrainer(Executor):
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
client_key_path: str, client_cert_path: str):
"""Trainer for federated XGBoost.
Args:
server_address: address for the gRPC server to connect to.
world_size: the number of sites.
server_cert_path: the path to the server certificate file.
client_key_path: the path to the client key file.
client_cert_path: the path to the client certificate file.
"""
super().__init__()
self._server_address = server_address
self._world_size = world_size
self._server_cert_path = server_cert_path
self._client_key_path = client_key_path
self._client_cert_path = client_cert_path
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Executing {task_name}")
try:
if task_name == SupportedTasks.TRAIN:
self._do_training(fl_ctx)
return make_reply(ReturnCode.OK)
else:
self.log_error(fl_ctx, f"{task_name} is not a supported task.")
return make_reply(ReturnCode.TASK_UNKNOWN)
except BaseException as e:
self.log_exception(fl_ctx,
f"Task {task_name} failed. Exception: {e.__str__()}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _do_training(self, fl_ctx: FLContext):
client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME)
rank = int(client_name.split('-')[1]) - 1
communicator_env = {
'xgboost_communicator': 'federated',
'federated_server_address': self._server_address,
'federated_world_size': self._world_size,
'federated_rank': rank,
'federated_server_cert': self._server_cert_path,
'federated_client_key': self._client_key_path,
'federated_client_cert': self._client_cert_path
}
with xgb.collective.CommunicatorContext(**communicator_env):
# Load file, file will not be sharded in federated mode.
if rank == 0:
label = '&label_column=0'
else:
label = ''
dtrain = xgb.DMatrix(f'higgs.train.csv?format=csv{label}', data_split_mode=1)
dtest = xgb.DMatrix(f'higgs.test.csv?format=csv{label}', data_split_mode=1)
# specify parameters via map
param = {
'validate_parameters': True,
'eta': 0.1,
'gamma': 1.0,
'max_depth': 8,
'min_child_weight': 100,
'tree_method': 'approx',
'grow_policy': 'depthwise',
'objective': 'binary:logistic',
'eval_metric': 'auc',
}
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
# number of boosting rounds
num_round = 10
bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2)
# Save the model.
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = workspace.get_run_dir(run_number)
bst.save_model(os.path.join(run_dir, "higgs.model.federated.vertical.json"))
xgb.collective.communicator_print("Finished training\n")
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@MODELS.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
mocked_data = []
base_dir = os.path.join(root_dir, "waves_yesno")
os.makedirs(base_dir, exist_ok=True)
for i, label in enumerate(labels):
filename = f'{"_".join(str(l) for l in label)}.wav'
path = os.path.join(base_dir, filename)
data = get_whitenoise(sample_rate=8000, duration=6, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 8000)
mocked_data.append(normalize_wav(data))
return mocked_data
class TestYesNo(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
labels = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
]
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data = get_mock_data(cls.root_dir, cls.labels)
def _test_yesno(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
expected_label = self.labels[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 8000
assert label == expected_label
n_ite += 1
assert n_ite == len(self.data)
def test_yesno_str(self):
dataset = yesno.YESNO(self.root_dir)
self._test_yesno(dataset)
def test_yesno_path(self):
dataset = yesno.YESNO(Path(self.root_dir))
self._test_yesno(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
mocked_data = []
base_dir = os.path.join(root_dir, "waves_yesno")
os.makedirs(base_dir, exist_ok=True)
for i, label in enumerate(labels):
filename = f'{"_".join(str(l) for l in label)}.wav'
path = os.path.join(base_dir, filename)
data = get_whitenoise(sample_rate=8000, duration=6, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 8000)
mocked_data.append(normalize_wav(data))
return mocked_data
class TestYesNo(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
labels = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
]
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data = get_mock_data(cls.root_dir, cls.labels)
def _test_yesno(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
expected_label = self.labels[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 8000
assert label == expected_label
n_ite += 1
assert n_ite == len(self.data)
def test_yesno_str(self):
dataset = yesno.YESNO(self.root_dir)
self._test_yesno(dataset)
def test_yesno_path(self):
dataset = yesno.YESNO(Path(self.root_dir))
self._test_yesno(dataset)
|
"""Test node mapping."""
from llama_index.core import SQLDatabase
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import SimpleObjectNodeMapping
from llama_index.core.objects.table_node_mapping import (
SQLTableNodeMapping,
SQLTableSchema,
)
from llama_index.core.objects.tool_node_mapping import SimpleToolNodeMapping
from llama_index.core.tools.function_tool import FunctionTool
from pytest_mock import MockerFixture
class _TestObject(BaseModel):
"""Test object for node mapping."""
__test__ = False
name: str
def __hash__(self) -> int:
return hash(self.name)
def __str__(self) -> str:
return f"_TestObject(name='{self.name}')"
class _TestSQLDatabase(SQLDatabase):
"""Test object for SQL Table Schema Node Mapping."""
def __init__(self) -> None:
pass
def test_simple_object_node_mapping() -> None:
"""Test simple object node mapping."""
strs = ["a", "b", "c"]
node_mapping = SimpleObjectNodeMapping.from_objects(strs)
assert node_mapping.to_node("a").text == "a"
assert node_mapping.from_node(node_mapping.to_node("a")) == "a"
objects = [_TestObject(name="a"), _TestObject(name="b"), _TestObject(name="c")]
node_mapping = SimpleObjectNodeMapping.from_objects(objects)
assert node_mapping.to_node(objects[0]).text == "_TestObject(name='a')"
assert node_mapping.from_node(node_mapping.to_node(objects[0])) == objects[0]
def test_simple_object_node_mapping_persist() -> None:
"""Test persist/load."""
strs = ["a", "b", "c"]
node_mapping = SimpleObjectNodeMapping.from_objects(strs)
node_mapping.persist()
loaded_node_mapping = SimpleObjectNodeMapping.from_persist_dir()
assert node_mapping.obj_node_mapping == loaded_node_mapping.obj_node_mapping
def test_tool_object_node_mapping() -> None:
"""Test tool object node mapping."""
tool1 = FunctionTool.from_defaults(
fn=lambda x: x,
name="test_tool",
description="test",
)
tool2 = FunctionTool.from_defaults(
fn=lambda x, y: x + y, name="test_tool2", description="test"
)
node_mapping = SimpleToolNodeMapping.from_objects([tool1, tool2])
# don't need to check for tool fn schema
assert ("Tool name: test_tool\nTool description: test\n") in node_mapping.to_node(
tool1
).get_text()
assert node_mapping.from_node(node_mapping.to_node(tool1)) == tool1
assert ("Tool name: test_tool2\nTool description: test\n") in node_mapping.to_node(
tool2
).get_text()
recon_tool2 = node_mapping.from_node(node_mapping.to_node(tool2))
assert recon_tool2(1, 2).raw_output == 3
tool3 = FunctionTool.from_defaults(
fn=lambda x, y: x * y, name="test_tool3", description="test3"
)
node_mapping.add_object(tool3)
assert ("Tool name: test_tool3\nTool description: test3\n") in node_mapping.to_node(
tool3
).get_text()
assert node_mapping.from_node(node_mapping.to_node(tool3)) == tool3
def test_sql_table_node_mapping_to_node(mocker: MockerFixture) -> None:
"""Test to add node for sql table node mapping object to ensure no 'None' values in metadata output to avoid issues with nulls when upserting to indexes."""
mocker.patch(
"llama_index.core.utilities.sql_wrapper.SQLDatabase.get_single_table_info",
return_value="",
)
# Define two table schemas with one that does not have context str defined
table1 = SQLTableSchema(table_name="table1")
table2 = SQLTableSchema(table_name="table2", context_str="stuff here")
tables = [table1, table2]
# Create the mapping
sql_database = _TestSQLDatabase()
mapping = SQLTableNodeMapping(sql_database)
# Create the nodes
nodes = []
for table in tables:
node = mapping.to_node(table)
nodes.append(node)
# Make sure no None values are passed in otherwise PineconeVectorStore will fail the upsert
for node in nodes:
assert None not in node.metadata.values()
|
"""Test node mapping."""
from llama_index.core import SQLDatabase
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import SimpleObjectNodeMapping
from llama_index.core.objects.table_node_mapping import (
SQLTableNodeMapping,
SQLTableSchema,
)
from llama_index.core.objects.tool_node_mapping import SimpleToolNodeMapping
from llama_index.core.tools.function_tool import FunctionTool
from pytest_mock import MockerFixture
class _TestObject(BaseModel):
"""Test object for node mapping."""
__test__ = False
name: str
def __hash__(self) -> int:
return hash(self.name)
def __str__(self) -> str:
return f"_TestObject(name='{self.name}')"
class _TestSQLDatabase(SQLDatabase):
"""Test object for SQL Table Schema Node Mapping."""
def __init__(self) -> None:
pass
def test_simple_object_node_mapping() -> None:
"""Test simple object node mapping."""
strs = ["a", "b", "c"]
node_mapping = SimpleObjectNodeMapping.from_objects(strs)
assert node_mapping.to_node("a").text == "a"
assert node_mapping.from_node(node_mapping.to_node("a")) == "a"
objects = [_TestObject(name="a"), _TestObject(name="b"), _TestObject(name="c")]
node_mapping = SimpleObjectNodeMapping.from_objects(objects)
assert node_mapping.to_node(objects[0]).text == "_TestObject(name='a')"
assert node_mapping.from_node(node_mapping.to_node(objects[0])) == objects[0]
def test_simple_object_node_mapping_persist() -> None:
"""Test persist/load."""
strs = ["a", "b", "c"]
node_mapping = SimpleObjectNodeMapping.from_objects(strs)
node_mapping.persist()
loaded_node_mapping = SimpleObjectNodeMapping.from_persist_dir()
assert node_mapping.obj_node_mapping == loaded_node_mapping.obj_node_mapping
def test_tool_object_node_mapping() -> None:
"""Test tool object node mapping."""
tool1 = FunctionTool.from_defaults(
fn=lambda x: x,
name="test_tool",
description="test",
)
tool2 = FunctionTool.from_defaults(
fn=lambda x, y: x + y, name="test_tool2", description="test"
)
node_mapping = SimpleToolNodeMapping.from_objects([tool1, tool2])
# don't need to check for tool fn schema
assert (
"Tool name: test_tool\n" "Tool description: test\n"
) in node_mapping.to_node(tool1).get_text()
assert node_mapping.from_node(node_mapping.to_node(tool1)) == tool1
assert (
"Tool name: test_tool2\n" "Tool description: test\n"
) in node_mapping.to_node(tool2).get_text()
recon_tool2 = node_mapping.from_node(node_mapping.to_node(tool2))
assert recon_tool2(1, 2).raw_output == 3
tool3 = FunctionTool.from_defaults(
fn=lambda x, y: x * y, name="test_tool3", description="test3"
)
node_mapping.add_object(tool3)
assert (
"Tool name: test_tool3\n" "Tool description: test3\n"
) in node_mapping.to_node(tool3).get_text()
assert node_mapping.from_node(node_mapping.to_node(tool3)) == tool3
def test_sql_table_node_mapping_to_node(mocker: MockerFixture) -> None:
"""Test to add node for sql table node mapping object to ensure no 'None' values in metadata output to avoid issues with nulls when upserting to indexes."""
mocker.patch(
"llama_index.core.utilities.sql_wrapper.SQLDatabase.get_single_table_info",
return_value="",
)
# Define two table schemas with one that does not have context str defined
table1 = SQLTableSchema(table_name="table1")
table2 = SQLTableSchema(table_name="table2", context_str="stuff here")
tables = [table1, table2]
# Create the mapping
sql_database = _TestSQLDatabase()
mapping = SQLTableNodeMapping(sql_database)
# Create the nodes
nodes = []
for table in tables:
node = mapping.to_node(table)
nodes.append(node)
# Make sure no None values are passed in otherwise PineconeVectorStore will fail the upsert
for node in nodes:
assert None not in node.metadata.values()
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
See this script for more details on how to use the new training API:
https://github.com/UKPLab/sentence-transformers/blob/master/examples/sentence_transformer/unsupervised_learning/TSDAE/train_stsb_tsdae.py
"""
from __future__ import annotations
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: list[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
See this script for more details on how to use the new training API:
https://github.com/UKPLab/sentence-transformers/blob/master/examples/unsupervised_learning/TSDAE/train_stsb_tsdae.py
"""
from __future__ import annotations
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: list[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
import collections
from keras.src import backend
from keras.src import testing
from keras.src.utils import tracking
class TrackingTest(testing.TestCase):
def test_untracking_in_tracked_list(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1.0)
v2 = backend.Variable(2.0)
lst = tracking.TrackedList([], tracker)
lst.append(v1)
lst.append(None)
lst.append(v2)
lst.append(0)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
lst.remove(v1)
self.assertLen(lst, 3)
self.assertLen(tracked_variables, 1)
lst.remove(v2)
self.assertLen(lst, 2)
self.assertLen(tracked_variables, 0)
lst2 = tracking.TrackedList([], tracker)
lst2.append(v1)
lst2.append(None)
lst2.append(v2)
lst2.append(0)
popped_value = lst2.pop()
self.assertEqual(popped_value, 0)
self.assertLen(lst2, 3)
self.assertLen(tracked_variables, 2)
lst2.clear()
self.assertLen(lst2, 0)
self.assertLen(tracked_variables, 0)
lst2.append(v1)
lst2.append(v2)
del lst2[0]
self.assertLen(lst2, 1)
self.assertLen(tracked_variables, 1)
def test_tuple_tracking(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1.0)
v2 = backend.Variable(2.0)
tup = (v1, v2)
tup = tracker.track(tup)
self.assertIsInstance(tup, tuple)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
def test_namedtuple_tracking(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1.0)
v2 = backend.Variable(2.0)
nt = collections.namedtuple("NT", ["x", "y"])
tup = nt(x=v1, y=v2)
tup = tracker.track(tup)
self.assertIsInstance(tup, tuple)
self.assertEqual(tup.x, v1)
self.assertEqual(tup.y, v2)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
|
import collections
from keras.src import backend
from keras.src import testing
from keras.src.utils import tracking
class TrackingTest(testing.TestCase):
def test_untracking_in_tracked_list(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1)
v2 = backend.Variable(2)
lst = tracking.TrackedList([], tracker)
lst.append(v1)
lst.append(None)
lst.append(v2)
lst.append(0)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
lst.remove(v1)
self.assertLen(lst, 3)
self.assertLen(tracked_variables, 1)
lst.remove(v2)
self.assertLen(lst, 2)
self.assertLen(tracked_variables, 0)
lst2 = tracking.TrackedList([], tracker)
lst2.append(v1)
lst2.append(None)
lst2.append(v2)
lst2.append(0)
popped_value = lst2.pop()
self.assertEqual(popped_value, 0)
self.assertLen(lst2, 3)
self.assertLen(tracked_variables, 2)
lst2.clear()
self.assertLen(lst2, 0)
self.assertLen(tracked_variables, 0)
lst2.append(v1)
lst2.append(v2)
del lst2[0]
self.assertLen(lst2, 1)
self.assertLen(tracked_variables, 1)
def test_tuple_tracking(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1)
v2 = backend.Variable(2)
tup = (v1, v2)
tup = tracker.track(tup)
self.assertIsInstance(tup, tuple)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
def test_namedtuple_tracking(self):
tracked_variables = []
tracker = tracking.Tracker(
{
"variables": (
lambda x: isinstance(x, backend.Variable),
tracked_variables,
),
}
)
v1 = backend.Variable(1)
v2 = backend.Variable(2)
nt = collections.namedtuple("NT", ["x", "y"])
tup = nt(x=v1, y=v2)
tup = tracker.track(tup)
self.assertIsInstance(tup, tuple)
self.assertEqual(tup.x, v1)
self.assertEqual(tup.y, v2)
self.assertLen(tracked_variables, 2)
self.assertEqual(tracked_variables[0], v1)
self.assertEqual(tracked_variables[1], v2)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .empty_cache_hook import EmptyCacheHook
from .checkpoint_hook import CheckpointHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'EmptyCacheHook', 'CheckpointHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'EmptyCacheHook'
]
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=3,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=3,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
|
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from typing import Optional as Optional
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated. Please see the docstring below or at the link"
" for a replacement option: "
"https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html"
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""[DEPRECATED] Pipeline prompt template.
This has been deprecated in favor of chaining individual prompts together in your
code. E.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**_inputs)
else:
kwargs[k] = await prompt.aformat(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
PipelinePromptTemplate.model_rebuild()
|
from typing import Any
from typing import Optional as Optional
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated. Please see the docstring below or at the link"
" for a replacement option: "
"https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.pipeline.PipelinePromptTemplate.html"
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""This has been deprecated in favor of chaining individual prompts together in your
code. E.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**_inputs)
else:
kwargs[k] = await prompt.aformat(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
PipelinePromptTemplate.model_rebuild()
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial003 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert '"deepLinking": false,' in response.text, (
"overridden configs should be preserved"
)
assert '"deepLinking": true' not in response.text, (
"overridden configs should not include the old value"
)
assert '"syntaxHighlight": false' not in response.text, (
"not used parameters should not be included"
)
assert '"dom_id": "#swagger-ui"' in response.text, (
"default configs should be preserved"
)
assert "presets: [" in response.text, "default configs should be preserved"
assert "SwaggerUIBundle.presets.apis," in response.text, (
"default configs should be preserved"
)
assert "SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text, (
"default configs should be preserved"
)
assert '"layout": "BaseLayout",' in response.text, (
"default configs should be preserved"
)
assert '"showExtensions": true,' in response.text, (
"default configs should be preserved"
)
assert '"showCommonExtensions": true,' in response.text, (
"default configs should be preserved"
)
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial003 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert (
'"deepLinking": false,' in response.text
), "overridden configs should be preserved"
assert (
'"deepLinking": true' not in response.text
), "overridden configs should not include the old value"
assert (
'"syntaxHighlight": false' not in response.text
), "not used parameters should not be included"
assert (
'"dom_id": "#swagger-ui"' in response.text
), "default configs should be preserved"
assert "presets: [" in response.text, "default configs should be preserved"
assert (
"SwaggerUIBundle.presets.apis," in response.text
), "default configs should be preserved"
assert (
"SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text
), "default configs should be preserved"
assert (
'"layout": "BaseLayout",' in response.text
), "default configs should be preserved"
assert (
'"showExtensions": true,' in response.text
), "default configs should be preserved"
assert (
'"showCommonExtensions": true,' in response.text
), "default configs should be preserved"
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
import warnings
from typing import Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
import warnings
from typing import Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
if TYPE_CHECKING:
from torch import Tensor
from sentence_transformers.evaluation import SimilarityFunction
from sentence_transformers.evaluation.NanoBEIREvaluator import (
DatasetNameType,
)
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseNanoBEIREvaluator(NanoBEIREvaluator):
information_retrieval_class = SparseInformationRetrievalEvaluator
def __init__(
self,
dataset_names: list[DatasetNameType] | None = None,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] = None,
main_score_function: str | SimilarityFunction | None = None,
aggregate_fn: Callable[[list[float]], float] = np.mean,
aggregate_key: str = "mean",
query_prompts: str | dict[str, str] | None = None,
corpus_prompts: str | dict[str, str] | None = None,
):
super().__init__(
dataset_names=dataset_names,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
aggregate_fn=aggregate_fn,
aggregate_key=aggregate_key,
query_prompts=query_prompts,
corpus_prompts=corpus_prompts,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def _load_dataset(
self, dataset_name: DatasetNameType, **ir_evaluator_kwargs
) -> SparseInformationRetrievalEvaluator:
return super()._load_dataset(dataset_name, **ir_evaluator_kwargs)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
if TYPE_CHECKING:
from torch import Tensor
from sentence_transformers.evaluation import SimilarityFunction
from sentence_transformers.evaluation.NanoBEIREvaluator import (
DatasetNameType,
)
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseNanoBEIREvaluator(NanoBEIREvaluator):
def __init__(
self,
dataset_names: list[DatasetNameType] | None = None,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] = None,
main_score_function: str | SimilarityFunction | None = None,
aggregate_fn: Callable[[list[float]], float] = np.mean,
aggregate_key: str = "mean",
query_prompts: str | dict[str, str] | None = None,
corpus_prompts: str | dict[str, str] | None = None,
):
self.information_retrieval_class = SparseInformationRetrievalEvaluator
super().__init__(
dataset_names=dataset_names,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
aggregate_fn=aggregate_fn,
aggregate_key=aggregate_key,
query_prompts=query_prompts,
corpus_prompts=corpus_prompts,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def _load_dataset(
self, dataset_name: DatasetNameType, **ir_evaluator_kwargs
) -> SparseInformationRetrievalEvaluator:
return super()._load_dataset(dataset_name, **ir_evaluator_kwargs)
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader:
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader:
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fused_semantic_head import FusedSemanticHead
@MODELS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fused_semantic_head import FusedSemanticHead
@MODELS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self) -> Dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
PKG2PROJECT = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction2': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
'mmyolo': 'mmyolo',
}
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in PKG2PROJECT, 'mmengine does not support to load ' \
f'{package} config.'
package = PKG2PROJECT[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
PKG2PROJECT = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction2': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
}
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in PKG2PROJECT, 'mmengine does not support to load ' \
f'{package} config.'
package = PKG2PROJECT[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import WanTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class WanTransformer3DTests(ModelTesterMixin, unittest.TestCase):
model_class = WanTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 1
num_channels = 4
num_frames = 2
height = 16
width = 16
text_encoder_embedding_dim = 16
sequence_length = 12
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 1, 16, 16)
@property
def output_shape(self):
return (4, 1, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 4,
"out_channels": 4,
"text_dim": 16,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"WanTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class WanTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase):
model_class = WanTransformer3DModel
def prepare_init_args_and_inputs_for_common(self):
return WanTransformer3DTests().prepare_init_args_and_inputs_for_common()
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import WanTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin
enable_full_determinism()
class WanTransformer3DTests(ModelTesterMixin, TorchCompileTesterMixin, unittest.TestCase):
model_class = WanTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 1
num_channels = 4
num_frames = 2
height = 16
width = 16
text_encoder_embedding_dim = 16
sequence_length = 12
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 1, 16, 16)
@property
def output_shape(self):
return (4, 1, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 4,
"out_channels": 4,
"text_dim": 16,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"WanTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
from llama_index_instrumentation.base import BaseEvent # noqa
|
from typing import Any, Dict, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
from uuid import uuid4
from datetime import datetime
from llama_index.core.instrumentation.span import active_span_id
class BaseEvent(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
# copy_on_model_validation = "deep" # not supported in Pydantic V2...
)
timestamp: datetime = Field(default_factory=lambda: datetime.now())
id_: str = Field(default_factory=lambda: str(uuid4()))
span_id: Optional[str] = Field(default_factory=active_span_id.get) # type: ignore
tags: Dict[str, Any] = Field(default={})
@classmethod
def class_name(cls) -> str:
"""Return class name."""
return "BaseEvent"
def dict(self, **kwargs: Any) -> Dict[str, Any]:
"""Keep for backwards compatibility."""
return self.model_dump(**kwargs)
def model_dump(self, **kwargs: Any) -> Dict[str, Any]:
data = super().model_dump(**kwargs)
data["class_name"] = self.class_name()
return data
|
"""Module definitions of agent types together with corresponding agents."""
from enum import Enum
from langchain_core._api import deprecated
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class AgentType(str, Enum):
"""An enum for agent types.
See documentation: https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent_types.AgentType.html
"""
ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description"
"""A zero shot agent that does a reasoning step before acting."""
REACT_DOCSTORE = "react-docstore"
"""A zero shot agent that does a reasoning step before acting.
This agent has access to a document store that allows it to look up
relevant information to answering the question.
"""
SELF_ASK_WITH_SEARCH = "self-ask-with-search"
"""An agent that breaks down a complex question into a series of simpler questions.
This agent uses a search tool to look up answers to the simpler questions
in order to answer the original complex question.
"""
CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description"
CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description"
"""A zero shot agent that does a reasoning step before acting.
This agent is designed to be used in conjunction
"""
CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description"
STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = (
"structured-chat-zero-shot-react-description"
)
"""An zero-shot react agent optimized for chat models.
This agent is capable of invoking tools that have multiple inputs.
"""
OPENAI_FUNCTIONS = "openai-functions"
"""An agent optimized for using open AI functions."""
OPENAI_MULTI_FUNCTIONS = "openai-multi-functions"
|
"""Module definitions of agent types together with corresponding agents."""
from enum import Enum
from langchain_core._api import deprecated
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class AgentType(str, Enum):
"""An enum for agent types.
See documentation: https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent_types.AgentType.html
"""
ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description"
"""A zero shot agent that does a reasoning step before acting."""
REACT_DOCSTORE = "react-docstore"
"""A zero shot agent that does a reasoning step before acting.
This agent has access to a document store that allows it to look up
relevant information to answering the question.
"""
SELF_ASK_WITH_SEARCH = "self-ask-with-search"
"""An agent that breaks down a complex question into a series of simpler questions.
This agent uses a search tool to look up answers to the simpler questions
in order to answer the original complex question.
"""
CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description"
CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description"
"""A zero shot agent that does a reasoning step before acting.
This agent is designed to be used in conjunction
"""
CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description"
STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION = (
"structured-chat-zero-shot-react-description"
)
"""An zero-shot react agent optimized for chat models.
This agent is capable of invoking tools that have multiple inputs.
"""
OPENAI_FUNCTIONS = "openai-functions"
"""An agent optimized for using open AI functions."""
OPENAI_MULTI_FUNCTIONS = "openai-multi-functions"
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def _record_memory_information(self, runner: Runner) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (Union[Sequence[:obj:`BaseDataElement`], dict], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
self._record_memory_information(runner)
def after_val_iter(
self,
runner: Runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (Union[Sequence[:obj:`BaseDataElement`], dict], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
self._record_memory_information(runner)
def after_test_iter(
self,
runner: Runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the testing process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (Union[Sequence[:obj:`BaseDataElement`], dict], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
self._record_memory_information(runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def _after_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Union[Sequence[BaseDataElement],
dict]] = None,
mode: str = 'train') -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (Union[Sequence[:obj:`BaseDataElement`], dict], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.packs.corrective_rag import CorrectiveRAGPack
def test_class():
names_of_base_classes = [b.__name__ for b in CorrectiveRAGPack.__mro__]
assert BaseLlamaPack.__name__ in names_of_base_classes
|
from llama_index.core.llama_pack import BaseLlamaPack
from llama_index.packs.corrective_rag import CorrectiveRAGPack
def test_class():
names_of_base_classes = [b.__name__ for b in CorrectiveRAGPack.__mro__]
assert BaseLlamaPack.__name__ in names_of_base_classes
|
"""Logic for selecting examples to include in prompts."""
from typing import TYPE_CHECKING, Any
from langchain_core.example_selectors.length_based import (
LengthBasedExampleSelector,
)
from langchain_core.example_selectors.semantic_similarity import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.example_selectors.ngram_overlap import (
NGramOverlapExampleSelector,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUPS = {
"NGramOverlapExampleSelector": (
"langchain_community.example_selectors.ngram_overlap"
),
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUPS)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"NGramOverlapExampleSelector",
"SemanticSimilarityExampleSelector",
]
|
"""Logic for selecting examples to include in prompts."""
from typing import TYPE_CHECKING, Any
from langchain_core.example_selectors.length_based import (
LengthBasedExampleSelector,
)
from langchain_core.example_selectors.semantic_similarity import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.example_selectors.ngram_overlap import (
NGramOverlapExampleSelector,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUPS = {
"NGramOverlapExampleSelector": "langchain_community.example_selectors.ngram_overlap"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUPS)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"NGramOverlapExampleSelector",
"SemanticSimilarityExampleSelector",
]
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
**kwargs,
):
super().__init__(
features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, **kwargs
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class TensorDisplay:
"""
Rich representation of a tensor.
"""
tensor_min_width: int = 30
def __init__(self, tensor: 'AbstractTensor'):
self.tensor = tensor
def __rich_console__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'RenderResult':
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < 200:
import colorsys
from rich.color import Color
from rich.segment import Segment
from rich.style import Style
tensor_normalized = comp_be.minmax_normalize(t_squeezed, (0, 5))
hue = 0.75
saturation = 1.0
for idx, y in enumerate(tensor_normalized):
luminance = 0.1 + ((y / 5) * 0.7)
r, g, b = colorsys.hls_to_rgb(hue, luminance + 0.07, saturation)
color = Color.from_rgb(r * 255, g * 255, b * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
else:
from rich.text import Text
yield Text(
f'{self.tensor.__class__.__name__} of '
f'shape {comp_be.shape(self.tensor)}, '
f'dtype: {str(comp_be.dtype(self.tensor))}'
)
def __rich_measure__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'Measurement':
from rich.measure import Measurement
width = self._compute_table_width(max_width=options.max_width)
return Measurement(1, width)
def _compute_table_width(self, max_width: int) -> int:
"""
Compute the width of the table. Depending on the length of the tensor, the width
should be in the range of 30 (min) and a given `max_width`.
:return: the width of the table
"""
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < max_width:
min_capped = max(comp_be.shape(t_squeezed)[0], self.tensor_min_width)
min_max_capped = min(min_capped, max_width)
return min_max_capped
else:
return max_width
|
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from rich.console import Console, ConsoleOptions, RenderResult
from rich.measure import Measurement
from docarray.typing.tensor.abstract_tensor import AbstractTensor
class TensorDisplay:
"""
Rich representation of a tensor.
"""
tensor_min_width: int = 30
def __init__(self, tensor: 'AbstractTensor'):
self.tensor = tensor
def __rich_console__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'RenderResult':
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < 200:
import colorsys
from rich.color import Color
from rich.segment import Segment
from rich.style import Style
tensor_normalized = comp_be.minmax_normalize(t_squeezed, (0, 5))
hue = 0.75
saturation = 1.0
for idx, y in enumerate(tensor_normalized):
luminance = 0.1 + ((y / 5) * 0.7)
r, g, b = colorsys.hls_to_rgb(hue, luminance + 0.07, saturation)
color = Color.from_rgb(r * 255, g * 255, b * 255)
yield Segment('▄', Style(color=color, bgcolor=color))
if idx != 0 and idx % options.max_width == 0:
yield Segment.line()
else:
from rich.text import Text
yield Text(
f'{self.tensor.__class__.__name__} of '
f'shape {comp_be.shape(self.tensor)}, '
f'dtype: {str(comp_be.dtype(self.tensor))}'
)
def __rich_measure__(
self, console: 'Console', options: 'ConsoleOptions'
) -> 'Measurement':
from rich.measure import Measurement
width = self._compute_table_width(max_width=options.max_width)
return Measurement(1, width)
def _compute_table_width(self, max_width: int) -> int:
"""
Compute the width of the table. Depending on the length of the tensor, the width
should be in the range of 30 (min) and a given `max_width`.
:return: the width of the table
"""
comp_be = self.tensor.get_comp_backend()
t_squeezed = comp_be.squeeze(comp_be.detach(self.tensor))
if comp_be.n_dim(t_squeezed) == 1 and comp_be.shape(t_squeezed)[0] < max_width:
min_capped = max(comp_be.shape(t_squeezed)[0], self.tensor_min_width)
min_max_capped = min(min_capped, max_width)
return min_max_capped
else:
return max_width
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'revert_sync_batchnorm'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs'
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.