input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
Optional,
Dict,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
from ....array.mixins.find import FindMixin as BaseFindMixin
if TYPE_CHECKING:
import tensorflow
import torch
ElasticArrayType = TypeVar(
'ElasticArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin(BaseFindMixin):
def _find_similar_vectors(self, query: 'ElasticArrayType', limit=10):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
resp = self._client.knn_search(
index=self._config.index_name,
knn={
'field': 'embedding',
'query_vector': query,
'k': limit,
'num_candidates': 10000,
},
)
list_of_hits = resp['hits']['hits']
da = DocumentArray()
for result in list_of_hits:
doc = Document.from_base64(result['_source']['blob'])
doc.scores['score'] = NamedScore(value=result['_score'])
doc.embedding = result['_source']['embedding']
da.append(doc)
return da
def _find_similar_documents_from_text(
self, query: str, index: str = 'text', limit: int = 10
):
"""
Return keyword matches for the input query
:param query: text used for keyword search
:param limit: number of items to be retrieved
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
"""
resp = self._client.search(
index=self._config.index_name,
query={'match': {index: query}},
source=['id', 'blob', 'text'],
size=limit,
)
list_of_hits = resp['hits']['hits']
da = DocumentArray()
for result in list_of_hits[:limit]:
doc = Document.from_base64(result['_source']['blob'])
doc.scores['score'] = NamedScore(value=result['_score'])
da.append(doc)
return da
def _find_by_text(
self, query: Union[str, List[str]], index: str = 'text', limit: int = 10
):
if isinstance(query, str):
query = [query]
return [
self._find_similar_documents_from_text(q, index=index, limit=limit)
for q in query
]
def _find(
self,
query: 'ElasticArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Elastic. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
"""
if filter is not None:
raise ValueError(
'Filtered vector search is not supported for ElasticSearch backend'
)
query = np.array(query)
num_rows, n_dim = ndarray.get_array_rows(query)
if n_dim != 2:
query = query.reshape((num_rows, -1))
return [self._find_similar_vectors(q, limit=limit) for q in query]
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
from ....array.mixins.find import FindMixin as BaseFindMixin
if TYPE_CHECKING:
import tensorflow
import torch
ElasticArrayType = TypeVar(
'ElasticArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin(BaseFindMixin):
def _find_similar_vectors(self, query: 'ElasticArrayType', limit=10):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
resp = self._client.knn_search(
index=self._config.index_name,
knn={
'field': 'embedding',
'query_vector': query,
'k': limit,
'num_candidates': 10000,
},
)
list_of_hits = resp['hits']['hits']
da = DocumentArray()
for result in list_of_hits:
doc = Document.from_base64(result['_source']['blob'])
doc.scores['score'] = NamedScore(value=result['_score'])
doc.embedding = result['_source']['embedding']
da.append(doc)
return da
def _find_similar_documents_from_text(
self, query: str, index: str = 'text', limit: int = 10
):
"""
Return keyword matches for the input query
:param query: text used for keyword search
:param limit: number of items to be retrieved
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
"""
resp = self._client.search(
index=self._config.index_name,
query={'match': {index: query}},
source=['id', 'blob', 'text'],
size=limit,
)
list_of_hits = resp['hits']['hits']
da = DocumentArray()
for result in list_of_hits[:limit]:
doc = Document.from_base64(result['_source']['blob'])
doc.scores['score'] = NamedScore(value=result['_score'])
da.append(doc)
return da
def _find_by_text(
self, query: Union[str, List[str]], index: str = 'text', limit: int = 10
):
if isinstance(query, str):
query = [query]
return [
self._find_similar_documents_from_text(q, index=index, limit=limit)
for q in query
]
def _find(
self,
query: 'ElasticArrayType',
limit: int = 10,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Elastic. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
"""
query = np.array(query)
num_rows, n_dim = ndarray.get_array_rows(query)
if n_dim != 2:
query = query.reshape((num_rows, -1))
return [self._find_similar_vectors(q, limit=limit) for q in query]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler'
]
|
from typing import Any
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def import_lancedb() -> Any:
try:
import lancedb
except ImportError as e:
raise ImportError(
"Could not import lancedb package. "
"Please install it with `pip install lancedb`."
) from e
return lancedb
@pytest.mark.requires("lancedb")
def test_lancedb_with_connection() -> None:
lancedb = import_lancedb()
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb_connection")
texts = ["text 1", "text 2", "item 3"]
store = LanceDB(connection=db, embedding=embeddings)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
store.delete(filter="text = 'text 1'")
assert store.get_table().count_rows() == 2
@pytest.mark.requires("lancedb")
def test_lancedb_without_connection() -> None:
embeddings = FakeEmbeddings()
texts = ["text 1", "text 2", "item 3"]
store = LanceDB(embedding=embeddings)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_add_texts() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
@pytest.mark.requires("lancedb")
def test_mmr() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 1", "text 2", "item 3"])
result = store.max_marginal_relevance_search(query="text")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
result = store.max_marginal_relevance_search_by_vector(
embeddings.embed_query("text")
)
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_delete() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 1", "text 2", "item 3"])
store.delete(filter="text = 'text 1'")
assert store.get_table().count_rows() == 2
@pytest.mark.requires("lancedb")
def test_lancedb_delete_by_ids() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings, id_key="pk")
ids = store.add_texts(["text 1", "text 2", "item 3"])
store.delete(ids=ids)
assert store.get_table().count_rows() == 0
@pytest.mark.requires("lancedb")
def test_lancedb_all_searches() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 1", "text 2", "item 3"])
result_1 = store.similarity_search_with_relevance_scores(
"text 1", distance="cosine"
)
assert len(result_1[0]) == 2
assert "text 1" in result_1[0][0].page_content
result_2 = store.similarity_search_by_vector(embeddings.embed_query("text 1"))
assert "text 1" in result_2[0].page_content
result_3 = store.similarity_search_by_vector_with_relevance_scores(
embeddings.embed_query("text 1")
)
assert len(result_3[0]) == 2
assert "text 1" in result_3[0][0].page_content
@pytest.mark.requires("lancedb")
def test_lancedb_no_metadata() -> None:
lancedb = import_lancedb()
embeddings = FakeEmbeddings()
# Connect to a temporary LanceDB instance
db = lancedb.connect("/tmp/lancedb_no_metadata_test")
# Create data without the 'metadata' field
texts = ["text 1", "text 2", "item 3"]
data = []
for idx, text in enumerate(texts):
embedding = embeddings.embed_documents([text])[0]
data.append(
{
"vector": embedding,
"id": str(idx),
"text": text,
# Note: We're deliberately not including 'metadata' here
}
)
# Create the table without 'metadata' column
db.create_table("vectorstore_no_metadata", data=data)
# Initialize LanceDB with the existing connection and table name
store = LanceDB(
connection=db,
embedding=embeddings,
table_name="vectorstore_no_metadata",
)
# Perform a similarity search
result = store.similarity_search("text 1")
# Verify that the metadata in the Document objects is an empty dictionary
for doc in result:
assert doc.metadata == {}, (
"Expected empty metadata when 'metadata' column is missing"
)
# Clean up by deleting the table (optional)
db.drop_table("vectorstore_no_metadata")
|
from typing import Any
import pytest
from langchain_community.vectorstores import LanceDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def import_lancedb() -> Any:
try:
import lancedb
except ImportError as e:
raise ImportError(
"Could not import lancedb package. "
"Please install it with `pip install lancedb`."
) from e
return lancedb
@pytest.mark.requires("lancedb")
def test_lancedb_with_connection() -> None:
lancedb = import_lancedb()
embeddings = FakeEmbeddings()
db = lancedb.connect("/tmp/lancedb_connection")
texts = ["text 1", "text 2", "item 3"]
store = LanceDB(connection=db, embedding=embeddings)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
store.delete(filter="text = 'text 1'")
assert store.get_table().count_rows() == 2
@pytest.mark.requires("lancedb")
def test_lancedb_without_connection() -> None:
embeddings = FakeEmbeddings()
texts = ["text 1", "text 2", "item 3"]
store = LanceDB(embedding=embeddings)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_add_texts() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
@pytest.mark.requires("lancedb")
def test_mmr() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 1", "text 2", "item 3"])
result = store.max_marginal_relevance_search(query="text")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
result = store.max_marginal_relevance_search_by_vector(
embeddings.embed_query("text")
)
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("lancedb")
def test_lancedb_delete() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 1", "text 2", "item 3"])
store.delete(filter="text = 'text 1'")
assert store.get_table().count_rows() == 2
@pytest.mark.requires("lancedb")
def test_lancedb_delete_by_ids() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings, id_key="pk")
ids = store.add_texts(["text 1", "text 2", "item 3"])
store.delete(ids=ids)
assert store.get_table().count_rows() == 0
@pytest.mark.requires("lancedb")
def test_lancedb_all_searches() -> None:
embeddings = FakeEmbeddings()
store = LanceDB(embedding=embeddings)
store.add_texts(["text 1", "text 2", "item 3"])
result_1 = store.similarity_search_with_relevance_scores(
"text 1", distance="cosine"
)
assert len(result_1[0]) == 2
assert "text 1" in result_1[0][0].page_content
result_2 = store.similarity_search_by_vector(embeddings.embed_query("text 1"))
assert "text 1" in result_2[0].page_content
result_3 = store.similarity_search_by_vector_with_relevance_scores(
embeddings.embed_query("text 1")
)
assert len(result_3[0]) == 2 # type: ignore
assert "text 1" in result_3[0][0].page_content # type: ignore
@pytest.mark.requires("lancedb")
def test_lancedb_no_metadata() -> None:
lancedb = import_lancedb()
embeddings = FakeEmbeddings()
# Connect to a temporary LanceDB instance
db = lancedb.connect("/tmp/lancedb_no_metadata_test")
# Create data without the 'metadata' field
texts = ["text 1", "text 2", "item 3"]
data = []
for idx, text in enumerate(texts):
embedding = embeddings.embed_documents([text])[0]
data.append(
{
"vector": embedding,
"id": str(idx),
"text": text,
# Note: We're deliberately not including 'metadata' here
}
)
# Create the table without 'metadata' column
db.create_table("vectorstore_no_metadata", data=data)
# Initialize LanceDB with the existing connection and table name
store = LanceDB(
connection=db,
embedding=embeddings,
table_name="vectorstore_no_metadata",
)
# Perform a similarity search
result = store.similarity_search("text 1")
# Verify that the metadata in the Document objects is an empty dictionary
for doc in result:
assert doc.metadata == {}, (
"Expected empty metadata when 'metadata' column is missing"
)
# Clean up by deleting the table (optional)
db.drop_table("vectorstore_no_metadata")
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper,
DefaultOptimWrapperConstructor, OptimWrapper,
OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict',
'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR', 'PolyMomentum',
'PolyParamScheduler', 'ReduceOnPlateauLR', 'ReduceOnPlateauMomentum',
'ReduceOnPlateauParamScheduler'
]
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-270k_coco.py' # noqa
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[81000, 85500, 87750],
gamma=0.1)
]
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py'
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[81000, 85500, 87750],
gamma=0.1)
]
|
from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs
|
from jina import Executor, requests, DocumentArray
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=2048,
feat_channels=2048,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=2048,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=2048,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms=dict(type='nms', iou_threshold=0.7),
nms_pre=6000,
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
preprocess_cfg=preprocess_cfg,
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=2048,
feat_channels=2048,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=2048,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=2048,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms=dict(type='nms', iou_threshold=0.7),
nms_pre=6000,
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for some NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=["QuoraRetrieval", "MSMARCO"],
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for some NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=["QuoraRetrieval", "MSMARCO"],
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
print("Starting evaluation on all NanoBEIR datasets")
results = evaluator(model)
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# Print results for each dataset
for key, value in results.items():
if key.startswith("Nano"):
print(f"{key}: {value:.4f}")
|
from typing import Literal, Optional
from langchain_core.agents import AgentAction
def _escape(xml: str) -> str:
"""Replace XML tags with custom safe delimiters."""
replacements = {
"<tool>": "[[tool]]",
"</tool>": "[[/tool]]",
"<tool_input>": "[[tool_input]]",
"</tool_input>": "[[/tool_input]]",
"<observation>": "[[observation]]",
"</observation>": "[[/observation]]",
}
for orig, repl in replacements.items():
xml = xml.replace(orig, repl)
return xml
def format_xml(
intermediate_steps: list[tuple[AgentAction, str]],
*,
escape_format: Optional[Literal["minimal"]] = "minimal",
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
escape_format: The escaping format to use. Currently only 'minimal' is
supported, which replaces XML tags with custom delimiters to prevent
conflicts.
Returns:
The intermediate steps as XML.
"""
log = ""
for action, observation in intermediate_steps:
if escape_format == "minimal":
# Escape XML tags in tool names and inputs using custom delimiters
tool = _escape(action.tool)
tool_input = _escape(str(action.tool_input))
observation = _escape(str(observation))
else:
tool = action.tool
tool_input = str(action.tool_input)
observation = str(observation)
log += (
f"<tool>{tool}</tool><tool_input>{tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
return log
|
from langchain_core.agents import AgentAction
def format_xml(
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML.
"""
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
return log
|
"""Integration test for Stack Exchange."""
from langchain_community.utilities import StackExchangeAPIWrapper
def test_call() -> None:
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run("zsh: command not found: python")
assert output != "hello"
def test_failure() -> None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run("sjefbsmnf")
assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow"
def test_success() -> None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run("zsh: command not found: python")
assert "zsh: command not found: python" in output
|
"""Integration test for Stack Exchange."""
from langchain_community.utilities import StackExchangeAPIWrapper
def test_call() -> None:
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("zsh: command not found: python")
assert output != "hello"
def test_failure() -> None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("sjefbsmnf")
assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow"
def test_success() -> None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg]
output = stackexchange.run("zsh: command not found: python")
assert "zsh: command not found: python" in output
|
_base_ = '../gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
mask_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
_base_ = '../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
mask_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
import random
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
from keras.src.utils.module_utils import tensorflow as tf
GLOBAL_RANDOM_SEED = "global_random_seed"
@keras_export("keras.utils.set_random_seed")
def set_random_seed(seed):
"""Sets all random seeds (Python, NumPy, and backend framework, e.g. TF).
You can use this utility to make almost any Keras program fully
deterministic. Some limitations apply in cases where network communications
are involved (e.g. parameter server distribution), which creates additional
sources of randomness, or when certain non-deterministic cuDNN ops are
involved.
Calling this utility is equivalent to the following:
```python
import random
random.seed(seed)
import numpy as np
np.random.seed(seed)
import tensorflow as tf # Only if TF is installed
tf.random.set_seed(seed)
import torch # Only if the backend is 'torch'
torch.manual_seed(seed)
```
Note that the TensorFlow seed is set even if you're not using TensorFlow
as your backend framework, since many workflows leverage `tf.data`
pipelines (which feature random shuffling). Likewise many workflows
might leverage NumPy APIs.
Arguments:
seed: Integer, the random seed to use.
"""
if not isinstance(seed, int):
raise ValueError(
"Expected `seed` argument to be an integer. "
f"Received: seed={seed} (of type {type(seed)})"
)
# Store seed in global state so we can query it if set.
global_state.set_global_attribute(GLOBAL_RANDOM_SEED, seed)
random.seed(seed)
np.random.seed(seed)
if tf.available:
tf.random.set_seed(seed)
if backend.backend() == "torch":
import torch
torch.manual_seed(seed)
def get_random_seed():
"""Returns the explicit integer random seed if set.
If the seed has been explicitly set via `set_random_seed`, then
returns the seed. Otherwise, returns `None`.
"""
return global_state.get_global_attribute(GLOBAL_RANDOM_SEED)
|
import random
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.utils.set_random_seed")
def set_random_seed(seed):
"""Sets all random seeds (Python, NumPy, and backend framework, e.g. TF).
You can use this utility to make almost any Keras program fully
deterministic. Some limitations apply in cases where network communications
are involved (e.g. parameter server distribution), which creates additional
sources of randomness, or when certain non-deterministic cuDNN ops are
involved.
Calling this utility is equivalent to the following:
```python
import random
random.seed(seed)
import numpy as np
np.random.seed(seed)
import tensorflow as tf # Only if TF is installed
tf.random.set_seed(seed)
import torch # Only if the backend is 'torch'
torch.manual_seed(seed)
```
Note that the TensorFlow seed is set even if you're not using TensorFlow
as your backend framework, since many workflows leverage `tf.data`
pipelines (which feature random shuffling). Likewise many workflows
might leverage NumPy APIs.
Arguments:
seed: Integer, the random seed to use.
"""
if not isinstance(seed, int):
raise ValueError(
"Expected `seed` argument to be an integer. "
f"Received: seed={seed} (of type {type(seed)})"
)
random.seed(seed)
np.random.seed(seed)
if tf.available:
tf.random.set_seed(seed)
if backend.backend() == "torch":
import torch
torch.manual_seed(seed)
|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
import torch
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True,
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
|
from typing import Any, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: List[BaseTool],
remember_intermediate_steps: bool = True,
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
from __future__ import annotations
import os
from copy import deepcopy
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, StaticEmbedding, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture(scope="session")
def _stsb_bert_tiny_model() -> SentenceTransformer:
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def stsb_bert_tiny_model(_stsb_bert_tiny_model: SentenceTransformer) -> SentenceTransformer:
return deepcopy(_stsb_bert_tiny_model)
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding_model(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
def test_from_to_json():
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png')),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=Image()),
]
)
json_da = da.to_json()
da2 = DocumentArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import pytest
from docarray import BaseDocument
from docarray.typing import NdArray
from docarray.documents import Image
from docarray import DocumentArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
def test_from_to_json():
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png')),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=Image()),
]
)
json_da = da.to_json()
da2 = DocumentArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
storage_options: Optional[Dict] = None
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
if route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
if self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
if route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
if self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
elif route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
elif self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
else:
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import elastic_transform
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
] # yapf: disable
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore
class ElasticsearchIndexStore(KVIndexStore):
"""
Elasticsearch Index store.
Args:
elasticsearch_kvstore (ElasticsearchKVStore): Elasticsearch key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
elasticsearch_kvstore: ElasticsearchKVStore,
collection_index: Optional[str] = None,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a ElasticsearchIndexStore."""
super().__init__(
elasticsearch_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
if collection_index:
self._collection = collection_index
else:
self._collection = f"llama_index-index_store.data-{self._namespace}"
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore
class ElasticsearchIndexStore(KVIndexStore):
"""Elasticsearch Index store.
Args:
elasticsearch_kvstore (ElasticsearchKVStore): Elasticsearch key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
elasticsearch_kvstore: ElasticsearchKVStore,
collection_index: Optional[str] = None,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a ElasticsearchIndexStore."""
super().__init__(
elasticsearch_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
if collection_index:
self._collection = collection_index
else:
self._collection = f"llama_index-index_store.data-{self._namespace}"
|
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
from .step import CoAAgentWorker
class CoAAgentPack(BaseLlamaPack):
"""
Chain-of-abstraction Agent Pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use. Defaults to gpt-4.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or Settings.llm
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = CoAAgentWorker.from_tools(
tools=tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {})
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {})
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
from .step import CoAAgentWorker
class CoAAgentPack(BaseLlamaPack):
"""Chain-of-abstraction Agent Pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use. Defaults to gpt-4.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or Settings.llm
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = CoAAgentWorker.from_tools(
tools=tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {})
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {})
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline)))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline)))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
'rocksdict<=0.2.16',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
'rocksdict<=0.2.16',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from typing import Optional, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
T = TypeVar('T', bound='ID')
class ID(str, BaseNode):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: Optional['ModelField'] = None,
config: Optional['BaseConfig'] = None,
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> NodeProto:
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(id=self)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
from typing import TYPE_CHECKING, Optional, Type, TypeVar, Union
from uuid import UUID
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ID')
class ID(str, BaseNode):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: Optional['ModelField'] = None,
config: Optional['BaseConfig'] = None,
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> NodeProto:
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(id=self)
|
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
patch_size=4,
window_size=12,
mlp_ratio=4,
depths=depths,
num_heads=[6, 12, 24, 48],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside
pixel_decoder=dict(
_delete_=True,
type='PixelDecoder',
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')),
enforce_decoder_input_project=True))
# optimizer
# weight_decay = 0.01
# norm_weight_decay = 0.0
# embed_weight_decay = 0.0
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
norm_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'norm': norm_multi,
'absolute_pos_embed': embed_multi,
'relative_position_bias_table': embed_multi,
'query_embed': embed_multi
}
optim_wrapper = dict(
optimizer=dict(lr=6e-5, weight_decay=0.01),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
max_epochs = 300
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[250],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (64 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
patch_size=4,
window_size=12,
mlp_ratio=4,
depths=depths,
num_heads=[6, 12, 24, 48],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside
pixel_decoder=dict(
_delete_=True,
type='PixelDecoder',
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')),
enforce_decoder_input_project=True))
# optimizer
# weight_decay = 0.01
# norm_weight_decay = 0.0
# embed_weight_decay = 0.0
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
norm_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'norm': norm_multi,
'absolute_pos_embed': embed_multi,
'relative_position_bias_table': embed_multi,
'query_embed': embed_multi
}
optim_wrapper = dict(
optimizer=dict(lr=6e-5, weight_decay=0.01),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
max_epochs = 300
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[250],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Tuple
from mmcv.runner import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs):
"""Forward function during training."""
# TODO: Currently not supported
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x: Tuple[Tensor],
proposal_list: InstanceList,
batch_img_metas: List[dict],
rescale: bool = False,
**kwargs):
"""Test without augmentation."""
# TODO: Currently not supported
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple, Union
from mmcv.runner import BaseModule
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from torch import Tensor
from mmdet.core import DetDataSample
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: Optional[Union[ConfigDict, dict]] = None,
bbox_head: Optional[Union[ConfigDict, dict]] = None,
mask_roi_extractor: Optional[Union[ConfigDict, dict]] = None,
mask_head: Optional[Union[ConfigDict, dict]] = None,
shared_head: Optional[Union[ConfigDict, dict]] = None,
train_cfg: Optional[Union[ConfigDict, dict]] = None,
test_cfg: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
shared_head.pretrained = pretrained
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self, x: Tuple[Tensor],
proposal_list: List[InstanceData],
batch_data_samples: List[DetDataSample], **kwargs):
"""Forward function during training."""
# TODO: Currently not supported
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x: Tuple[Tensor],
proposal_list: List[InstanceData],
batch_img_metas: List[dict],
rescale: bool = False,
**kwargs):
"""Test without augmentation."""
# TODO: Currently not supported
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
|
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Initialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
|
from keras.src import tree
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Iniitialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
if isinstance(x, tf.SparseTensor):
return data_adapter_utils.tf_sparse_to_jax_sparse(x)
else:
# We use numpy as an intermediary because it is faster.
return convert_to_numpy(x)
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.src.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
|
import os
import json
import time
import pytest
from urllib import request
from jina import Flow
from jina.serve.runtimes.gateway.http.models import _to_camel_case
from docarray import Document
from jina import helper
from jina import Executor, requests
from tests import validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
# check if this can be bypassed
IGNORED_FIELDS = ['embedding', 'scores', 'graphInfo', 'evaluations']
@pytest.fixture
def docs():
return [Document(id=f'{idx}', text=f'doc{idx}') for idx in range(10)]
def test_no_matches_grpc(mocker, docs):
def validate_response(resp):
for doc in resp.data.docs:
assert len(doc.matches) == 0
mock_on_done = mocker.Mock()
with Flow().add() as f:
f.search(inputs=docs, on_done=mock_on_done)
validate_callback(mock_on_done, validate_response)
@pytest.fixture
def query_dict():
return {'top_k': 3, 'mode': 'search', 'data': [{'text': 'query'}]}
class MockExecutor(Executor):
@requests
def foo(self, docs, *args, **kwargs):
for doc in docs:
doc.tags['tag'] = 'test'
def test_no_matches_rest(query_dict):
port = helper.random_port()
with Flow(
protocol='http',
port=port,
including_default_value_fields=True,
).add(uses=MockExecutor):
# temporarily adding sleep
time.sleep(0.5)
query = json.dumps(query_dict).encode('utf-8')
req = request.Request(
f'http://localhost:{port}/search',
data=query,
headers={'content-type': 'application/json'},
)
resp = request.urlopen(req).read().decode('utf8')
doc = json.loads(resp)['data'][0]
assert len(Document.from_dict(doc).matches) == 0
assert Document.from_dict(doc).tags['tag'] == 'test'
|
import os
import json
import time
import pytest
from urllib import request
from jina import Flow
from jina.serve.runtimes.gateway.http.models import _to_camel_case
from jina import Document
from jina import helper
from jina import Executor, requests
from tests import validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
# check if this can be bypassed
IGNORED_FIELDS = ['embedding', 'scores', 'graphInfo', 'evaluations']
@pytest.fixture
def docs():
return [Document(id=f'{idx}', text=f'doc{idx}') for idx in range(10)]
def test_no_matches_grpc(mocker, docs):
def validate_response(resp):
for doc in resp.data.docs:
assert len(doc.matches) == 0
mock_on_done = mocker.Mock()
with Flow().add() as f:
f.search(inputs=docs, on_done=mock_on_done)
validate_callback(mock_on_done, validate_response)
@pytest.fixture
def query_dict():
return {'top_k': 3, 'mode': 'search', 'data': [{'text': 'query'}]}
class MockExecutor(Executor):
@requests
def foo(self, docs, *args, **kwargs):
for doc in docs:
doc.tags['tag'] = 'test'
def test_no_matches_rest(query_dict):
port = helper.random_port()
with Flow(
protocol='http',
port=port,
including_default_value_fields=True,
).add(uses=MockExecutor):
# temporarily adding sleep
time.sleep(0.5)
query = json.dumps(query_dict).encode('utf-8')
req = request.Request(
f'http://localhost:{port}/search',
data=query,
headers={'content-type': 'application/json'},
)
resp = request.urlopen(req).read().decode('utf8')
doc = json.loads(resp)['data'][0]
assert len(Document.from_dict(doc).matches) == 0
assert Document.from_dict(doc).tags['tag'] == 'test'
|
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.multi_modal_llms.nebius import NebiusMultiModal
def test_multi_modal_class():
names_of_base_classes = [b.__name__ for b in NebiusMultiModal.__mro__]
assert OpenAIMultiModal.__name__ in names_of_base_classes
|
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.nebius import NebiusMultiModal
def test_multi_modal_class():
names_of_base_classes = [b.__name__ for b in NebiusMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
"""
OpenAI Agent.
Simple wrapper around AgentRunner + OpenAIAgentWorker.
For the legacy implementation see:
```python
from llama_index.agent.legacy.openai.base import OpenAIAgent
```
"""
from typing import (
Any,
Dict,
List,
Callable,
Optional,
Type,
)
from llama_index.agent.openai.step import OpenAIAgentWorker
from llama_index.core.agent.runner.base import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import OpenAIToolCall
DEFAULT_MAX_FUNCTION_CALLS = 5
class OpenAIAgent(AgentRunner):
"""
OpenAI agent.
Subclasses AgentRunner with a OpenAIAgentWorker.
For the legacy implementation see:
```python
from llama_index.agent.legacy.openai.base import OpenAIAgent
```
"""
def __init__(
self,
tools: List[BaseTool],
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
default_tool_choice: str = "auto",
callback_manager: Optional[CallbackManager] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
) -> None:
"""Init params."""
callback_manager = callback_manager or llm.callback_manager
step_engine = OpenAIAgentWorker.from_tools(
tools=tools,
tool_retriever=tool_retriever,
llm=llm,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
prefix_messages=prefix_messages,
tool_call_parser=tool_call_parser,
)
super().__init__(
step_engine,
memory=memory,
llm=llm,
callback_manager=callback_manager,
default_tool_choice=default_tool_choice,
)
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
default_tool_choice: str = "auto",
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
**kwargs: Any,
) -> "OpenAIAgent":
"""
Create an OpenAIAgent from a list of tools.
Similar to `from_defaults` in other classes, this method will
infer defaults for a variety of parameters, including the LLM,
if they are not specified.
"""
tools = tools or []
chat_history = chat_history or []
llm = llm or Settings.llm
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
if callback_manager is not None:
llm.callback_manager = callback_manager
memory = memory or memory_cls.from_defaults(chat_history, llm=llm)
if not llm.metadata.is_function_calling_model:
raise ValueError(
f"Model name {llm.model} does not support function calling API. "
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
tools=tools,
tool_retriever=tool_retriever,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
default_tool_choice=default_tool_choice,
tool_call_parser=tool_call_parser,
)
|
"""OpenAI Agent.
Simple wrapper around AgentRunner + OpenAIAgentWorker.
For the legacy implementation see:
```python
from llama_index.agent.legacy.openai.base import OpenAIAgent
```
"""
from typing import (
Any,
Dict,
List,
Callable,
Optional,
Type,
)
from llama_index.agent.openai.step import OpenAIAgentWorker
from llama_index.core.agent.runner.base import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.objects.base import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import OpenAIToolCall
DEFAULT_MAX_FUNCTION_CALLS = 5
class OpenAIAgent(AgentRunner):
"""OpenAI agent.
Subclasses AgentRunner with a OpenAIAgentWorker.
For the legacy implementation see:
```python
from llama_index.agent.legacy.openai.base import OpenAIAgent
```
"""
def __init__(
self,
tools: List[BaseTool],
llm: OpenAI,
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
default_tool_choice: str = "auto",
callback_manager: Optional[CallbackManager] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
) -> None:
"""Init params."""
callback_manager = callback_manager or llm.callback_manager
step_engine = OpenAIAgentWorker.from_tools(
tools=tools,
tool_retriever=tool_retriever,
llm=llm,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
prefix_messages=prefix_messages,
tool_call_parser=tool_call_parser,
)
super().__init__(
step_engine,
memory=memory,
llm=llm,
callback_manager=callback_manager,
default_tool_choice=default_tool_choice,
)
@classmethod
def from_tools(
cls,
tools: Optional[List[BaseTool]] = None,
tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
default_tool_choice: str = "auto",
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
**kwargs: Any,
) -> "OpenAIAgent":
"""Create an OpenAIAgent from a list of tools.
Similar to `from_defaults` in other classes, this method will
infer defaults for a variety of parameters, including the LLM,
if they are not specified.
"""
tools = tools or []
chat_history = chat_history or []
llm = llm or Settings.llm
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
if callback_manager is not None:
llm.callback_manager = callback_manager
memory = memory or memory_cls.from_defaults(chat_history, llm=llm)
if not llm.metadata.is_function_calling_model:
raise ValueError(
f"Model name {llm.model} does not support function calling API. "
)
if system_prompt is not None:
if prefix_messages is not None:
raise ValueError(
"Cannot specify both system_prompt and prefix_messages"
)
prefix_messages = [ChatMessage(content=system_prompt, role="system")]
prefix_messages = prefix_messages or []
return cls(
tools=tools,
tool_retriever=tool_retriever,
llm=llm,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
callback_manager=callback_manager,
default_tool_choice=default_tool_choice,
tool_call_parser=tool_call_parser,
)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_torch_available
if is_torch_available():
from .faster_cache import FasterCacheConfig, apply_faster_cache
from .first_block_cache import FirstBlockCacheConfig, apply_first_block_cache
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from ..utils import is_torch_available
if is_torch_available():
from .faster_cache import FasterCacheConfig, apply_faster_cache
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'TensorFlowTensor',
]
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
torch_available = is_torch_available()
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowEmbedding', 'TensorFlowTensor', 'ImageTensorFlowTensor'])
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'TensorFlowTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
pass
else:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowTensor'])
|
from keras.src.utils.module_utils import dmtree
def register_tree_node_class(cls):
return cls
def is_nested(structure):
return dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
return dmtree.traverse(func, structure, top_down=top_down)
def flatten(structure):
return dmtree.flatten(structure)
def map_structure(func, *structures):
return dmtree.map_structure(func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
return dmtree.map_structure_up_to(shallow_structure, func, *structures)
def assert_same_structure(a, b, check_types=True):
return dmtree.assert_same_structure(a, b, check_types=check_types)
def pack_sequence_as(structure, flat_sequence, sequence_fn=None):
is_nested_fn = dmtree.is_nested
sequence_fn = sequence_fn or dmtree._sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_nested_fn(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a structure, but found "
"incompatible type `{}` instead.".format(
truncate(flat_sequence, 100), type(flat_sequence)
)
)
if not is_nested_fn(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure),
truncate(structure, 100),
type(flat_sequence),
len(flat_sequence),
truncate(flat_sequence, 100),
)
)
return flat_sequence[0]
packed = []
try:
final_index, packed = packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = dmtree.flatten(structure)
if len(flat_structure) != len(flat_sequence):
# pylint: disable=raise-missing-from
raise ValueError(
"Could not pack sequence. "
f"Structure had {len(flat_structure)} atoms, but "
f"flat_sequence had {len(flat_sequence)} items. "
f"Structure: {structure}, flat_sequence: {flat_sequence}."
)
return sequence_fn(structure, packed)
def packed_nest_with_indices(
structure, flat, index, is_nested_fn, sequence_fn=None
):
"""Helper function for pack_sequence_as.
Args:
structure: structure to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_nested_fn: Function used to test if a value should
be treated as a nested structure.
sequence_fn: Function used to generate a new structure instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat`
having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
"""
packed = []
sequence_fn = sequence_fn or dmtree._sequence_like
for s in yield_value(structure):
if is_nested_fn(s):
new_index, child = packed_nest_with_indices(
s, flat, index, is_nested_fn, sequence_fn
)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def yield_value(iterable):
for _, v in dmtree._yield_sorted_items(iterable):
yield v
def lists_to_tuples(structure):
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return dmtree._sequence_like(instance, args)
return pack_sequence_as(
structure,
dmtree.flatten(structure),
sequence_fn=sequence_fn,
)
def is_shape_tuple(x):
if isinstance(x, (list, tuple)):
if all(isinstance(e, (int, type(None))) for e in x):
return True
return False
def map_shape_structure(func, structure):
if is_shape_tuple(structure):
return func(tuple(structure))
if isinstance(structure, list):
return [map_shape_structure(func, e) for e in structure]
if isinstance(structure, tuple):
return tuple(map_shape_structure(func, e) for e in structure)
if isinstance(structure, dict):
return {k: map_shape_structure(func, v) for k, v in structure.items()}
else:
raise ValueError(f"Cannot map function to unknown object {structure}")
|
from keras.src.utils.module_utils import dmtree
def register_tree_node_class(cls):
return cls
def is_nested(structure):
return dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
return dmtree.traverse(func, structure, top_down=top_down)
def flatten(structure):
return dmtree.flatten(structure)
def map_structure(func, *structures):
return dmtree.map_structure(func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
return dmtree.map_structure_up_to(shallow_structure, func, *structures)
def assert_same_structure(a, b, check_types=True):
return dmtree.assert_same_structure(a, b, check_types=check_types)
def pack_sequence_as(structure, flat_sequence, sequence_fn=None):
is_nested_fn = dmtree.is_nested
sequence_fn = sequence_fn or dmtree._sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_nested_fn(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a structure, but found "
"incompatible type `{}` instead.".format(
truncate(flat_sequence, 100), type(flat_sequence)
)
)
if not is_nested_fn(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure),
truncate(structure, 100),
type(flat_sequence),
len(flat_sequence),
truncate(flat_sequence, 100),
)
)
return flat_sequence[0]
try:
final_index, packed = packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = dmtree.flatten(structure)
if len(flat_structure) != len(flat_sequence):
# pylint: disable=raise-missing-from
raise ValueError(
"Could not pack sequence. "
f"Structure had {len(flat_structure)} atoms, but "
f"flat_sequence had {len(flat_sequence)} items. "
f"Structure: {structure}, flat_sequence: {flat_sequence}."
)
return sequence_fn(structure, packed)
def packed_nest_with_indices(
structure, flat, index, is_nested_fn, sequence_fn=None
):
"""Helper function for pack_sequence_as.
Args:
structure: structure to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_nested_fn: Function used to test if a value should
be treated as a nested structure.
sequence_fn: Function used to generate a new structure instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat`
having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
"""
packed = []
sequence_fn = sequence_fn or dmtree._sequence_like
for s in yield_value(structure):
if is_nested_fn(s):
new_index, child = packed_nest_with_indices(
s, flat, index, is_nested_fn, sequence_fn
)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def yield_value(iterable):
for _, v in dmtree._yield_sorted_items(iterable):
yield v
def lists_to_tuples(structure):
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return dmtree._sequence_like(instance, args)
return pack_sequence_as(
structure,
dmtree.flatten(structure),
sequence_fn=sequence_fn,
)
def is_shape_tuple(x):
if isinstance(x, (list, tuple)):
if all(isinstance(e, (int, type(None))) for e in x):
return True
return False
def map_shape_structure(func, structure):
if is_shape_tuple(structure):
return func(tuple(structure))
if isinstance(structure, list):
return [map_shape_structure(func, e) for e in structure]
if isinstance(structure, tuple):
return tuple(map_shape_structure(func, e) for e in structure)
if isinstance(structure, dict):
return {k: map_shape_structure(func, v) for k, v in structure.items()}
else:
raise ValueError(f"Cannot map function to unknown object {structure}")
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import (
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
TorchTensor,
)
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
torch_tensor=TorchTensor,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
import os
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: str,
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
|
import os
import pathlib
from typing import Callable, Optional
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class DTD(VisionDataset):
"""`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``.
.. note::
The partition only changes which split each image belongs to. Thus, regardless of the selected
partition, combining all splits will result in all images.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
_MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1"
def __init__(
self,
root: str,
split: str = "train",
partition: int = 1,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
if not isinstance(partition, int) and not (1 <= partition <= 10):
raise ValueError(
f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, "
f"but got {partition} instead"
)
self._partition = partition
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / type(self).__name__.lower()
self._data_folder = self._base_folder / "dtd"
self._meta_folder = self._data_folder / "labels"
self._images_folder = self._data_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._image_files = []
classes = []
with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file:
for line in file:
cls, name = line.strip().split("/")
self._image_files.append(self._images_folder.joinpath(cls, name))
classes.append(cls)
self.classes = sorted(set(classes))
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._labels = [self.class_to_idx[cls] for cls in classes]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx):
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}, partition={self._partition}"
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder)
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5)
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# TODO support auto_scale_lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.util import is_training_available
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
@pytest.mark.skipif(
not is_training_available(), reason='Sentence Transformers was not installed with the `["train"]` extra.'
)
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .RegularizerLoss import FlopsLoss, L0FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"L0FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from llama_index.core.exec_utils import _contains_protected_access
def test_contains_protected_access() -> None:
assert not _contains_protected_access("def _a(b): pass"), (
"definition of dunder function"
)
assert _contains_protected_access("a = _b(c)"), "call to protected function"
assert not _contains_protected_access("a = b(c)"), "call to public function"
assert _contains_protected_access("_b"), "access to protected name"
assert not _contains_protected_access("b"), "access to public name"
assert _contains_protected_access("_b[0]"), "subscript access to protected name"
assert not _contains_protected_access("b[0]"), "subscript access to public name"
assert _contains_protected_access("_a.b"), "access to attribute of a protected name"
assert not _contains_protected_access("a.b"), "access to attribute of a public name"
assert _contains_protected_access("a._b"), "access to protected attribute of a name"
assert not _contains_protected_access("a.b"), "access to public attribute of a name"
|
from llama_index.core.exec_utils import _contains_protected_access
def test_contains_protected_access() -> None:
assert not _contains_protected_access(
"def _a(b): pass"
), "definition of dunder function"
assert _contains_protected_access("a = _b(c)"), "call to protected function"
assert not _contains_protected_access("a = b(c)"), "call to public function"
assert _contains_protected_access("_b"), "access to protected name"
assert not _contains_protected_access("b"), "access to public name"
assert _contains_protected_access("_b[0]"), "subscript access to protected name"
assert not _contains_protected_access("b[0]"), "subscript access to public name"
assert _contains_protected_access("_a.b"), "access to attribute of a protected name"
assert not _contains_protected_access("a.b"), "access to attribute of a public name"
assert _contains_protected_access("a._b"), "access to protected attribute of a name"
assert not _contains_protected_access("a.b"), "access to public attribute of a name"
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begin=0,
# end=12,
# by_epoch=True,
# milestones=[16, 23],
# gamma=0.1)
# ]
_base_.param_scheduler[1].milestones = [16, 23]
train_cfg = dict(max_epochs=24)
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter names to learning rates. This allows you to set different learning rates for
different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is useful when you want to
fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset names to Router routes, like "query" or "document". This is used to specify which
Router module to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of dataset names to routes for single-dataset training/evaluation.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter names to learning rates. This allows you to set different learning rates for
different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is useful when you want to
fine-tune specific parts of the model with different learning rates.
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import ort_validate
__all__ = ['ort_validate']
|
from .utils import ort_validate
__all__ = ['ort_validate']
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields",
default=None,
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline:\n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel,
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert model == DEF_EXPECTED_RESULT
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel,
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
msg = "Expected OutputParserException"
raise AssertionError(msg)
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields",
default=None,
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline:\n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel,
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert model == DEF_EXPECTED_RESULT
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel,
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
assert False, "Expected OutputParserException"
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
import glob
import os
import cv2
import pytest
from jina import Document, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='package')
def build_da():
def _build_da():
return DocumentArray(
[
Document(blob=cv2.imread(path), tags={'filename': path.split('/')[-1]})
for path in glob.glob(os.path.join(cur_dir, 'data/img/*.jpg'))
]
)
return _build_da
|
import glob
import os
import cv2
import pytest
from jina import DocumentArray, Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='package')
def build_da():
def _build_da():
return DocumentArray([
Document(blob=cv2.imread(path), tags={'filename': path.split('/')[-1]})
for path in glob.glob(os.path.join(cur_dir, 'data/img/*.jpg'))
])
return _build_da
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestRPN(TestCase):
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.device.type == 'cpu'
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
assert detector.rpn_head.num_classes == 1
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
assert len(out) == 2
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
isinstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestRPN(TestCase):
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.device.type == 'cpu'
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
assert detector.rpn_head.num_classes == 1
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
assert len(out) == 2
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
isinstance(batch_results[0], DetDataSample)
|
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = pa.schema(self.dataset.features.type)
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in range(0, len(self.dataset), batch_size):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
|
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(
path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = pa.schema(self.dataset.features.type)
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in range(0, len(self.dataset), batch_size):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.31.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.30.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class BalanceSheetsSchema(BaseModel):
"""Input for BalanceSheets."""
ticker: str = Field(
description="The ticker symbol to fetch balance sheets for.",
)
period: str = Field(
description="The period of the balance sheets. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of balance sheets to return. Default is 10.",
)
class BalanceSheets(BaseTool):
"""
Tool that gets balance sheets for a given ticker over a given period.
"""
mode: str = "get_balance_sheets"
name: str = "balance_sheets"
description: str = (
"A wrapper around financial datasets's Balance Sheets API. "
"This tool is useful for fetching balance sheets for a given ticker."
"The tool fetches balance sheets for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of balance sheets to return can also be "
"specified using the limit parameter."
)
args_schema: Type[BalanceSheetsSchema] = BalanceSheetsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Balance Sheets API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class BalanceSheetsSchema(BaseModel):
"""Input for BalanceSheets."""
ticker: str = Field(
description="The ticker symbol to fetch balance sheets for.",
)
period: str = Field(
description="The period of the balance sheets. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of balance sheets to return. Default is 10.",
)
class BalanceSheets(BaseTool): # type: ignore[override, override]
"""
Tool that gets balance sheets for a given ticker over a given period.
"""
mode: str = "get_balance_sheets"
name: str = "balance_sheets"
description: str = (
"A wrapper around financial datasets's Balance Sheets API. "
"This tool is useful for fetching balance sheets for a given ticker."
"The tool fetches balance sheets for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of balance sheets to return can also be "
"specified using the limit parameter."
)
args_schema: Type[BalanceSheetsSchema] = BalanceSheetsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Balance Sheets API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
__version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.document.document import BaseDocument
from docarray.predefined_document import Audio, Image, Mesh3D, PointCloud3D, Text
__all__ = [
'BaseDocument',
'DocumentArray',
'Image',
'Audio',
'Text',
'Mesh3D',
'PointCloud3D',
]
|
__version__ = '0.1.0'
from docarray.array.array import DocumentArray
from docarray.document.document import BaseDocument
from docarray.predefined_document import Image, Mesh3D, PointCloud3D, Text
__all__ = ['BaseDocument', 'DocumentArray', 'Image', 'Text', 'Mesh3D', 'PointCloud3D']
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.utils import digit_version
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if digit_version(torch.__version__) >= digit_version('1.8'):
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead'
]
|
import inspect
import re
from typing import Dict, List
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from hashlib import sha256
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
STUB_MODEL_NAME = "placeholder_model"
@pytest.fixture(name="hf_inference_api")
def fixture_hf_inference_api() -> HuggingFaceInferenceAPI:
with patch.dict("sys.modules", huggingface_hub=MagicMock()):
return HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
class TestHuggingFaceInferenceAPI:
def test_class_name(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
assert HuggingFaceInferenceAPI.class_name() == HuggingFaceInferenceAPI.__name__
assert hf_inference_api.class_name() == HuggingFaceInferenceAPI.__name__
def test_instantiation(self) -> None:
mock_hub = MagicMock()
with patch.dict("sys.modules", huggingface_hub=mock_hub):
llm = HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
assert llm.model_name == STUB_MODEL_NAME
# Check can be both a large language model and an embedding model
assert isinstance(llm, HuggingFaceInferenceAPI)
# Confirm Clients are instantiated correctly
# mock_hub.InferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
# mock_hub.AsyncInferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
def test_chat(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
messages = [
ChatMessage(content="Which movie is the best?"),
ChatMessage(content="It's Die Hard for sure.", role=MessageRole.ASSISTANT),
ChatMessage(content="Can you explain why?"),
]
generated_response = (
" It's based on the book of the same name by James Fenimore Cooper."
)
conversational_return = {
"choices": [
{
"message": {
"content": generated_response,
}
}
],
}
with patch.object(
hf_inference_api._sync_client,
"chat_completion",
return_value=conversational_return,
) as mock_conversational:
response = hf_inference_api.chat(messages=messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == generated_response
mock_conversational.assert_called_once_with(
messages=[{"role": m.role.value, "content": m.content} for m in messages],
model=STUB_MODEL_NAME,
)
def test_chat_text_generation(
self, hf_inference_api: HuggingFaceInferenceAPI
) -> None:
mock_message_to_prompt = MagicMock(
return_value="System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:"
)
hf_inference_api.task = "text-generation"
hf_inference_api.messages_to_prompt = mock_message_to_prompt
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are an expert movie reviewer"
),
ChatMessage(role=MessageRole.USER, content="Which movie is the best?"),
]
conversational_return = "It's Die Hard for sure."
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=conversational_return,
) as mock_complete:
response = hf_inference_api.chat(messages=messages)
hf_inference_api.messages_to_prompt.assert_called_once_with(messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == conversational_return
mock_complete.assert_called_once_with(
"System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:",
max_new_tokens=256,
)
def test_complete(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
prompt = "My favorite color is "
generated_text = '"green" and I love to paint. I have been painting for 30 years and have been'
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=generated_text,
) as mock_text_generation:
response = hf_inference_api.complete(prompt)
mock_text_generation.assert_called_once_with(prompt, max_new_tokens=256)
assert response.text == generated_text
|
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
STUB_MODEL_NAME = "placeholder_model"
@pytest.fixture(name="hf_inference_api")
def fixture_hf_inference_api() -> HuggingFaceInferenceAPI:
with patch.dict("sys.modules", huggingface_hub=MagicMock()):
return HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
class TestHuggingFaceInferenceAPI:
def test_class_name(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
assert HuggingFaceInferenceAPI.class_name() == HuggingFaceInferenceAPI.__name__
assert hf_inference_api.class_name() == HuggingFaceInferenceAPI.__name__
def test_instantiation(self) -> None:
mock_hub = MagicMock()
with patch.dict("sys.modules", huggingface_hub=mock_hub):
llm = HuggingFaceInferenceAPI(model_name=STUB_MODEL_NAME)
assert llm.model_name == STUB_MODEL_NAME
# Check can be both a large language model and an embedding model
assert isinstance(llm, HuggingFaceInferenceAPI)
# Confirm Clients are instantiated correctly
# mock_hub.InferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
# mock_hub.AsyncInferenceClient.assert_called_once_with(
# model=STUB_MODEL_NAME, token=None, timeout=None, headers=None, cookies=None
# )
def test_chat(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
messages = [
ChatMessage(content="Which movie is the best?"),
ChatMessage(content="It's Die Hard for sure.", role=MessageRole.ASSISTANT),
ChatMessage(content="Can you explain why?"),
]
generated_response = (
" It's based on the book of the same name by James Fenimore Cooper."
)
conversational_return = {
"generated_text": generated_response,
"conversation": {
"generated_responses": ["It's Die Hard for sure.", generated_response],
"past_user_inputs": [
"Which movie is the best?",
"Can you explain why?",
],
},
}
with patch.object(
hf_inference_api._sync_client,
"conversational",
return_value=conversational_return,
) as mock_conversational:
response = hf_inference_api.chat(messages=messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == generated_response
mock_conversational.assert_called_once_with(
text="Can you explain why?",
past_user_inputs=["Which movie is the best?"],
generated_responses=["It's Die Hard for sure."],
)
def test_chat_text_generation(
self, hf_inference_api: HuggingFaceInferenceAPI
) -> None:
mock_message_to_prompt = MagicMock(
return_value="System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:"
)
hf_inference_api.task = "text-generation"
hf_inference_api.messages_to_prompt = mock_message_to_prompt
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are an expert movie reviewer"
),
ChatMessage(role=MessageRole.USER, content="Which movie is the best?"),
]
conversational_return = "It's Die Hard for sure."
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=conversational_return,
) as mock_complete:
response = hf_inference_api.chat(messages=messages)
hf_inference_api.messages_to_prompt.assert_called_once_with(messages)
assert response.message.role == MessageRole.ASSISTANT
assert response.message.content == conversational_return
mock_complete.assert_called_once_with(
"System: You are an expert movie reviewer\nUser: Which movie is the best?\nAssistant:",
max_new_tokens=256,
)
def test_complete(self, hf_inference_api: HuggingFaceInferenceAPI) -> None:
prompt = "My favorite color is "
generated_text = '"green" and I love to paint. I have been painting for 30 years and have been'
with patch.object(
hf_inference_api._sync_client,
"text_generation",
return_value=generated_text,
) as mock_text_generation:
response = hf_inference_api.complete(prompt)
mock_text_generation.assert_called_once_with(prompt, max_new_tokens=256)
assert response.text == generated_text
|
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './gfl_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple, Union
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.dl_utils import tensor2imgs
DATA_BATCH = Optional[Union[dict, tuple, list]]
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Defaults to True.
draw_pred (bool): Whether to draw the predicted result.
Defaults to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def before_train(self, runner) -> None:
"""Call add_graph method of visualizer.
Args:
runner (Runner): The runner of the training process.
"""
runner.visualizer.add_graph(runner.model, None)
def after_test_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
outputs (Sequence, optional): Outputs from model.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple, Union
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.dl_utils import tensor2imgs
DATA_BATCH = Optional[Union[dict, tuple, list]]
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def before_train(self, runner) -> None:
"""Call add_graph method of visualizer.
Args:
runner (Runner): The runner of the training process.
"""
runner.visualizer.add_graph(runner.model, None)
def after_test_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
outputs (Sequence, optional): Outputs from model.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_similarity_metric import (
AnswerSimilarityMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerSimilarityEvaluator(BaseEvaluator):
"""
Tonic Validate's answer similarity metric.
The output score is a float between 0.0 and 5.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerSimilarityMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference_response: Optional[str] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=reference_response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_similarity_metric import (
AnswerSimilarityMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerSimilarityEvaluator(BaseEvaluator):
"""
Tonic Validate's answer similarity metric.
The output score is a float between 0.0 and 5.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerSimilarityMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference_response: Optional[str] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=reference_response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Base dataset for detection.
Args:
proposal_file (str, optional): Proposals file path. Defaults to None.
file_client_args (dict): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
*args,
seg_map_suffix: str = '.png',
proposal_file: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
return_caption: Optional[bool] = False,
**kwargs) -> None:
self.seg_map_suffix = seg_map_suffix
self.proposal_file = proposal_file
self.backend_args = backend_args
self.return_caption = return_caption
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
super().__init__(*args, **kwargs)
def full_init(self) -> None:
"""Load annotation file and set ``BaseDataset._fully_initialized`` to
True.
If ``lazy_init=False``, ``full_init`` will be called during the
instantiation and ``self._fully_initialized`` will be set to True. If
``obj._fully_initialized=False``, the class method decorated by
``force_full_init`` will call ``full_init`` automatically.
Several steps to initialize annotation:
- load_data_list: Load annotations from annotation file.
- load_proposals: Load proposals from proposal file, if
`self.proposal_file` is not None.
- filter data information: Filter annotations according to
filter_cfg.
- slice_data: Slice dataset according to ``self._indices``
- serialize_data: Serialize ``self.data_list`` if
``self.serialize_data`` is True.
"""
if self._fully_initialized:
return
# load data information
self.data_list = self.load_data_list()
# get proposals from file
if self.proposal_file is not None:
self.load_proposals()
# filter illegal data, such as data that has no annotations.
self.data_list = self.filter_data()
# Get subset data according to indices.
if self._indices is not None:
self.data_list = self._get_unserialized_subset(self._indices)
# serialize data_list
if self.serialize_data:
self.data_bytes, self.data_address = self._serialize_data()
self._fully_initialized = True
def load_proposals(self) -> None:
"""Load proposals from proposals file.
The `proposals_list` should be a dict[img_path: proposals]
with the same length as `data_list`. And the `proposals` should be
a `dict` or :obj:`InstanceData` usually contains following keys.
- bboxes (np.ndarry): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- scores (np.ndarry): Classification scores, has a shape
(num_instance, ).
"""
# TODO: Add Unit Test after fully support Dump-Proposal Metric
if not is_abs(self.proposal_file):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
proposals_list = load(
self.proposal_file, backend_args=self.backend_args)
assert len(self.data_list) == len(proposals_list)
for data_info in self.data_list:
img_path = data_info['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
proposals = proposals_list[file_name]
data_info['proposals'] = proposals
def get_cat_ids(self, idx: int) -> List[int]:
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
List[int]: All categories in the image of specified index.
"""
instances = self.get_data_info(idx)['instances']
return [instance['bbox_label'] for instance in instances]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Base dataset for detection.
Args:
proposal_file (str, optional): Proposals file path. Defaults to None.
file_client_args (dict): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
*args,
seg_map_suffix: str = '.png',
proposal_file: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
**kwargs) -> None:
self.seg_map_suffix = seg_map_suffix
self.proposal_file = proposal_file
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
super().__init__(*args, **kwargs)
def full_init(self) -> None:
"""Load annotation file and set ``BaseDataset._fully_initialized`` to
True.
If ``lazy_init=False``, ``full_init`` will be called during the
instantiation and ``self._fully_initialized`` will be set to True. If
``obj._fully_initialized=False``, the class method decorated by
``force_full_init`` will call ``full_init`` automatically.
Several steps to initialize annotation:
- load_data_list: Load annotations from annotation file.
- load_proposals: Load proposals from proposal file, if
`self.proposal_file` is not None.
- filter data information: Filter annotations according to
filter_cfg.
- slice_data: Slice dataset according to ``self._indices``
- serialize_data: Serialize ``self.data_list`` if
``self.serialize_data`` is True.
"""
if self._fully_initialized:
return
# load data information
self.data_list = self.load_data_list()
# get proposals from file
if self.proposal_file is not None:
self.load_proposals()
# filter illegal data, such as data that has no annotations.
self.data_list = self.filter_data()
# Get subset data according to indices.
if self._indices is not None:
self.data_list = self._get_unserialized_subset(self._indices)
# serialize data_list
if self.serialize_data:
self.data_bytes, self.data_address = self._serialize_data()
self._fully_initialized = True
def load_proposals(self) -> None:
"""Load proposals from proposals file.
The `proposals_list` should be a dict[img_path: proposals]
with the same length as `data_list`. And the `proposals` should be
a `dict` or :obj:`InstanceData` usually contains following keys.
- bboxes (np.ndarry): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- scores (np.ndarry): Classification scores, has a shape
(num_instance, ).
"""
# TODO: Add Unit Test after fully support Dump-Proposal Metric
if not is_abs(self.proposal_file):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
proposals_list = load(
self.proposal_file, backend_args=self.backend_args)
assert len(self.data_list) == len(proposals_list)
for data_info in self.data_list:
img_path = data_info['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
proposals = proposals_list[file_name]
data_info['proposals'] = proposals
def get_cat_ids(self, idx: int) -> List[int]:
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
List[int]: All categories in the image of specified index.
"""
instances = self.get_data_info(idx)['instances']
return [instance['bbox_label'] for instance in instances]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(mmcv.scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
|
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(mmcv.scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
|
from typing import Any, Union
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
Additional dictionary of keyword arguments passed along to the tokenizer.
return_tensors (`bool`, *optional*):
If `True`, returns a tensor according to the specified framework, otherwise returns a list.""",
)
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
>>> result = extractor("This is a simple test.", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
torch.Size([1, 8, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if tokenize_kwargs is None:
tokenize_kwargs = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
)
tokenize_kwargs["truncation"] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, **tokenize_kwargs) -> dict[str, GenericTensor]:
model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self, *args: Union[str, list[str]], **kwargs: Any) -> Union[Any, list[Any]]:
"""
Extract the features of the input(s) text.
Args:
args (`str` or `list[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
Additional dictionary of keyword arguments passed along to the tokenizer.
return_tensors (`bool`, *optional*):
If `True`, returns a tensor according to the specified framework, otherwise returns a list.""",
)
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
>>> result = extractor("This is a simple test.", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
torch.Size([1, 8, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if tokenize_kwargs is None:
tokenize_kwargs = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
)
tokenize_kwargs["truncation"] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]:
model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self, *args: Union[str, List[str]], **kwargs: Any) -> Union[Any, List[Any]]:
"""
Extract the features of the input(s) text.
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
@pytest.mark.parametrize(
'fpath', pathlib.Path('docs/user_guide').glob('**/*.md'), ids=str
)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True)
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
import pathlib
import pytest
from mktestdocs import check_md_file
# @pytest.mark.parametrize('fpath', pathlib.Path("docs").glob("**/*.md"), ids=str)
# to use later
@pytest.mark.parametrize(
'fpath', pathlib.Path('docs/user_guide').glob('**/*.md'), ids=str
)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True)
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes')
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
Flow.load_config(args.config_path).to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
import json
from jina.orchestrate.flow.base import Flow
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
Flow.load_config(args.flowpath).to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
Flow.load_config(args.flowpath).to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.flowpath).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
__all__ = ['Mesh3DUrl', 'PointCloud3DUrl']
|
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.typing.url.url_3d.point_cloud_url import PointCloud3DUrl
__all__ = ['Mesh3DUrl', 'PointCloud3DUrl']
|
# model settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='FasterRCNN',
img_norm_cfg=img_norm_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
|
# model settings
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
|
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class ContextualCompressionRetriever(BaseRetriever):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: RetrieverLike
"""Base Retriever to use for getting relevant documents."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.invoke(
query, config={"callbacks": run_manager.get_child()}, **kwargs
)
if docs:
compressed_docs = self.base_compressor.compress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
return []
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.ainvoke(
query, config={"callbacks": run_manager.get_child()}, **kwargs
)
if docs:
compressed_docs = await self.base_compressor.acompress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
return []
|
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.retrievers import BaseRetriever, RetrieverLike
from pydantic import ConfigDict
class ContextualCompressionRetriever(BaseRetriever):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: RetrieverLike
"""Base Retriever to use for getting relevant documents."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.invoke(
query, config={"callbacks": run_manager.get_child()}, **kwargs
)
if docs:
compressed_docs = self.base_compressor.compress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
else:
return []
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.ainvoke(
query, config={"callbacks": run_manager.get_child()}, **kwargs
)
if docs:
compressed_docs = await self.base_compressor.acompress_documents(
docs, query, callbacks=run_manager.get_child()
)
return list(compressed_docs)
else:
return []
|
"""This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802
"""Throw an error because this has been replaced by LangChainTracer."""
msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
)
raise RuntimeError(msg)
|
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802
"""Throw an error because this has been replaced by LangChainTracer."""
msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
)
raise RuntimeError(msg)
|
import torch
from torchvision.prototype import features
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: features.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> features.VideoTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
elif isinstance(inpt, features.Video):
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return features.Video.wrap_like(inpt, output)
else:
raise TypeError(
f"Input can either be a plain tensor or a `Video` tensor subclass, but got {type(inpt)} instead."
)
|
import torch
from torchvision.prototype import features
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int, temporal_dim: int = -4) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[temporal_dim] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, temporal_dim, indices)
def uniform_temporal_subsample(
inpt: features.VideoTypeJIT, num_samples: int, temporal_dim: int = -4
) -> features.VideoTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features.Video)):
return uniform_temporal_subsample_video(inpt, num_samples, temporal_dim=temporal_dim)
else: # isinstance(inpt, features.Video)
if temporal_dim != -4 and inpt.ndim - 4 != temporal_dim:
raise ValueError("Video inputs must have temporal_dim equivalent to -4")
output = uniform_temporal_subsample_video(
inpt.as_subclass(torch.Tensor), num_samples, temporal_dim=temporal_dim
)
return features.Video.wrap_like(inpt, output)
|
import io
import warnings
from abc import ABC
import numpy as np
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.image_bytes import ImageBytes
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> 'ImageBytes':
"""
Convert image tensor to [`ImageBytes`][docarray.typing.ImageBytes].
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: an ImageBytes object
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(img_byte_arr)
def save(self, file_path: str) -> None:
"""
Save image tensor to an image file.
:param file_path: path to an image file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
comp_backend = self.get_comp_backend()
np_img = comp_backend.to_numpy(self).astype(np.uint8)
pil_img = PILImage.fromarray(np_img)
pil_img.save(file_path)
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import io
import warnings
from abc import ABC
import numpy as np
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.image_bytes import ImageBytes
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> 'ImageBytes':
"""
Convert image tensor to ImageBytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: an ImageBytes object
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(img_byte_arr)
def save(self, file_path: str) -> None:
"""
Save image tensor to an image file.
:param file_path: path to an image file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
comp_backend = self.get_comp_backend()
np_img = comp_backend.to_numpy(self).astype(np.uint8)
pil_img = PILImage.fromarray(np_img)
pil_img.save(file_path)
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
"""
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anthropic
from llama_index.llms.anthropic.utils import CLAUDE_MODELS
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import (
AZURE_TURBO_MODELS,
GPT3_5_MODELS,
GPT3_MODELS,
GPT4_MODELS,
TURBO_MODELS,
)
if TYPE_CHECKING:
from portkey import (
LLMOptions,
PortkeyResponse,
)
IMPORT_ERROR_MESSAGE = (
"Portkey is not installed.Please install it with `pip install portkey-ai`."
)
DISCONTINUED_MODELS = {
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
DEFAULT_MODEL = "gpt-3.5-turbo"
AVAILABLE_INTEGRATIONS = (OpenAI, Anthropic)
CLUADE_MODEL_FULLVERSION_MAP = {
"claude-instant-1": "claude-instant-1.2",
"claude-2": "claude-2.0",
}
ALL_AVAILABLE_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**GPT3_5_MODELS,
**GPT3_MODELS,
**AZURE_TURBO_MODELS,
**CLAUDE_MODELS,
}
CHAT_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**AZURE_TURBO_MODELS,
}
def is_chat_model(model: str) -> bool:
"""
Check if a given model is a chat-based language model.
This function takes a model name or identifier as input and determines whether
the model is designed for chat-based language generation, conversation, or
interaction.
Args:
model (str): The name or identifier of the model to be checked.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return model in CHAT_MODELS
def modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = modelname_to_contextsize("text-davinci-003")
"""
# handling finetuned models
if "ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
elif modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Model {modelname} has been discontinued. Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def generate_llm_metadata(llm: "LLMOptions") -> LLMMetadata:
"""
Generate metadata for a Language Model (LLM) instance.
This function takes an instance of a Language Model (LLM) and generates
metadata based on the provided instance. The metadata includes information
such as the context window, number of output tokens, chat model status,
and model name.
Parameters
----------
llm (LLM): An instance of a Language Model (LLM) from which metadata
will be generated.
Returns
-------
LLMMetadata: A data structure containing metadata attributes such as
context window, number of output tokens, chat model status, and
model name.
Raises
------
ValueError: If the provided 'llm' is not an instance of
llama_index.core.llms.LLM.
"""
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
if not isinstance(llm, LLMOptions):
raise ValueError("llm must be an instance of portkey.LLMOptions")
return LLMMetadata(
_context_window=modelname_to_contextsize(llm.model or ""),
is_chat_model=is_chat_model(llm.model or ""),
model_name=llm.model,
)
def get_llm(response: "PortkeyResponse", llms: List["LLMOptions"]) -> "LLMOptions":
# TODO: Update this logic over here.
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
fallback_llm = LLMOptions.construct()
for llm in llms:
model = llm.model
if model == response.model:
fallback_llm = llm
break
if fallback_llm is None:
raise ValueError("Failed to get the fallback LLM")
return fallback_llm
|
"""
Utility Tools for the Portkey Class.
This file module contains a collection of utility functions designed to enhance
the functionality and usability of the Portkey class
"""
from typing import TYPE_CHECKING, List
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.llms.anthropic import Anthropic
from llama_index.llms.anthropic.utils import CLAUDE_MODELS
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import (
AZURE_TURBO_MODELS,
GPT3_5_MODELS,
GPT3_MODELS,
GPT4_MODELS,
TURBO_MODELS,
)
if TYPE_CHECKING:
from portkey import (
LLMOptions,
PortkeyResponse,
)
IMPORT_ERROR_MESSAGE = (
"Portkey is not installed.Please install it with `pip install portkey-ai`."
)
DISCONTINUED_MODELS = {
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
DEFAULT_MODEL = "gpt-3.5-turbo"
AVAILABLE_INTEGRATIONS = (OpenAI, Anthropic)
CLUADE_MODEL_FULLVERSION_MAP = {
"claude-instant-1": "claude-instant-1.2",
"claude-2": "claude-2.0",
}
ALL_AVAILABLE_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**GPT3_5_MODELS,
**GPT3_MODELS,
**AZURE_TURBO_MODELS,
**CLAUDE_MODELS,
}
CHAT_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**AZURE_TURBO_MODELS,
}
def is_chat_model(model: str) -> bool:
"""
Check if a given model is a chat-based language model.
This function takes a model name or identifier as input and determines whether
the model is designed for chat-based language generation, conversation, or
interaction.
Args:
model (str): The name or identifier of the model to be checked.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return model in CHAT_MODELS
def modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = modelname_to_contextsize("text-davinci-003")
"""
# handling finetuned models
if "ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
elif modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Model {modelname} has been discontinued. " "Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def generate_llm_metadata(llm: "LLMOptions") -> LLMMetadata:
"""
Generate metadata for a Language Model (LLM) instance.
This function takes an instance of a Language Model (LLM) and generates
metadata based on the provided instance. The metadata includes information
such as the context window, number of output tokens, chat model status,
and model name.
Parameters
----------
llm (LLM): An instance of a Language Model (LLM) from which metadata
will be generated.
Returns
-------
LLMMetadata: A data structure containing metadata attributes such as
context window, number of output tokens, chat model status, and
model name.
Raises
------
ValueError: If the provided 'llm' is not an instance of
llama_index.core.llms.LLM.
"""
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
if not isinstance(llm, LLMOptions):
raise ValueError("llm must be an instance of portkey.LLMOptions")
return LLMMetadata(
_context_window=modelname_to_contextsize(llm.model or ""),
is_chat_model=is_chat_model(llm.model or ""),
model_name=llm.model,
)
def get_llm(response: "PortkeyResponse", llms: List["LLMOptions"]) -> "LLMOptions":
# TODO: Update this logic over here.
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
fallback_llm = LLMOptions.construct()
for llm in llms:
model = llm.model
if model == response.model:
fallback_llm = llm
break
if fallback_llm is None:
raise ValueError("Failed to get the fallback LLM")
return fallback_llm
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import (
TimeseriesGenerator as TimeseriesGenerator,
)
from keras.src.legacy.preprocessing.sequence import (
make_sampling_table as make_sampling_table,
)
from keras.src.legacy.preprocessing.sequence import skipgrams as skipgrams
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import TimeseriesGenerator
from keras.src.legacy.preprocessing.sequence import make_sampling_table
from keras.src.legacy.preprocessing.sequence import skipgrams
from keras.src.utils.sequence_utils import pad_sequences
|
from llama_index.observability.otel.base import LlamaIndexOpenTelemetry
__all__ = [
"LlamaIndexOpenTelemetry",
]
|
from llama_index.observability.otel.base import (
LlamaIndexOpenTelemetry
)
__all__ = [
"LlamaIndexOpenTelemetry",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""Global Gemini Utilities (shared between Gemini LLM and Vertex)."""
from __future__ import annotations
from collections.abc import Sequence
from llama_index.core.base.llms.types import ChatMessage, MessageRole
ROLES_TO_GEMINI: dict[MessageRole, MessageRole] = {
MessageRole.USER: MessageRole.USER,
MessageRole.ASSISTANT: MessageRole.MODEL,
## Gemini chat mode only has user and model roles. Put the rest in user role.
MessageRole.SYSTEM: MessageRole.USER,
MessageRole.MODEL: MessageRole.MODEL,
## Gemini has function role, but chat mode only accepts user and model roles.
## https://medium.com/@smallufo/openai-vs-gemini-function-calling-a664f7f2b29f
## Agent response's 'tool/function' role is converted to 'user' role.
MessageRole.TOOL: MessageRole.USER,
MessageRole.FUNCTION: MessageRole.USER,
}
ROLES_FROM_GEMINI: dict[str, MessageRole] = {
## Gemini has user, model and function roles.
"user": MessageRole.USER,
"model": MessageRole.ASSISTANT,
"function": MessageRole.TOOL,
}
def merge_neighboring_same_role_messages(
messages: Sequence[ChatMessage],
) -> Sequence[ChatMessage]:
if len(messages) < 2:
# Nothing to merge
return messages
# Gemini does not support multiple messages of the same role in a row, so we merge them
merged_messages = []
i = 0
while i < len(messages):
current_message = messages[i]
# Initialize merged content with current message content
merged_content = current_message.blocks
# Check if the next message exists and has the same role
while (
i + 1 < len(messages)
and ROLES_TO_GEMINI[messages[i + 1].role]
== ROLES_TO_GEMINI[current_message.role]
):
i += 1
next_message = messages[i]
merged_content.extend(next_message.blocks)
# Create a new ChatMessage or similar object with merged content
merged_message = ChatMessage(
role=ROLES_TO_GEMINI[current_message.role],
blocks=merged_content,
additional_kwargs=current_message.additional_kwargs,
)
merged_messages.append(merged_message)
i += 1
return merged_messages
|
"""Global Gemini Utilities (shared between Gemini LLM and Vertex)."""
from __future__ import annotations
from collections.abc import Sequence
from llama_index.core.base.llms.types import ChatMessage, MessageRole
ROLES_TO_GEMINI: dict[MessageRole, MessageRole] = {
MessageRole.USER: MessageRole.USER,
MessageRole.ASSISTANT: MessageRole.MODEL,
## Gemini chat mode only has user and model roles. Put the rest in user role.
MessageRole.SYSTEM: MessageRole.USER,
MessageRole.MODEL: MessageRole.MODEL,
## Gemini has function role, but chat mode only accepts user and model roles.
## https://medium.com/@smallufo/openai-vs-gemini-function-calling-a664f7f2b29f
## Agent response's 'tool/function' role is converted to 'user' role.
MessageRole.TOOL: MessageRole.USER,
MessageRole.FUNCTION: MessageRole.USER,
}
ROLES_FROM_GEMINI: dict[str, MessageRole] = {
## Gemini has user, model and function roles.
"user": MessageRole.USER,
"model": MessageRole.ASSISTANT,
"function": MessageRole.TOOL,
}
def merge_neighboring_same_role_messages(
messages: Sequence[ChatMessage],
) -> Sequence[ChatMessage]:
if len(messages) < 2:
# Nothing to merge
return messages
# Gemini does not support multiple messages of the same role in a row, so we merge them
merged_messages = []
i = 0
while i < len(messages):
current_message = messages[i]
# Initialize merged content with current message content
merged_content = [current_message.content]
# Check if the next message exists and has the same role
while (
i + 1 < len(messages)
and ROLES_TO_GEMINI[messages[i + 1].role]
== ROLES_TO_GEMINI[current_message.role]
):
i += 1
next_message = messages[i]
merged_content.extend([next_message.content])
# Create a new ChatMessage or similar object with merged content
merged_message = ChatMessage(
role=ROLES_TO_GEMINI[current_message.role],
content="\n".join([str(msg_content) for msg_content in merged_content]),
additional_kwargs=current_message.additional_kwargs,
)
merged_messages.append(merged_message)
i += 1
return merged_messages
|
# Copyright (c) OpenMMLab. All rights reserved.
from .history_buffer import HistoryBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['HistoryBuffer', 'MessageHub', 'MMLogger', 'print_log']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = ['LogBuffer', 'MessageHub', 'MMLogger', 'print_log']
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(classes=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
self.assertEqual(len(dataset), 4)
# test with test_mode = True
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
test_mode=True,
pipeline=[])
self.assertEqual(len(dataset), 4)
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(classes=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['CLASSES'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
self.assertEqual(len(dataset), 4)
# test with test_mode = True
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
test_mode=True,
pipeline=[])
self.assertEqual(len(dataset), 4)
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(CLASSES=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
|
__version__ = '0.12.3'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.12.2'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
".blp",
".bmp",
".dib",
".bufr",
".cur",
".pcx",
".dcx",
".dds",
".ps",
".eps",
".fit",
".fits",
".fli",
".flc",
".ftc",
".ftu",
".gbr",
".gif",
".grib",
".h5",
".hdf",
".png",
".apng",
".jp2",
".j2k",
".jpc",
".jpf",
".jpx",
".j2c",
".icns",
".ico",
".im",
".iim",
".tif",
".tiff",
".jfif",
".jpe",
".jpg",
".jpeg",
".mpg",
".mpeg",
".msp",
".pcd",
".pxr",
".pbm",
".pgm",
".ppm",
".pnm",
".psd",
".bw",
".rgb",
".rgba",
".sgi",
".ras",
".tga",
".icb",
".vda",
".vst",
".webp",
".wmf",
".emf",
".xbm",
".xpm",
]
ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
|
from typing import List
import datasets
from datasets.tasks import ImageClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label")
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
".blp",
".bmp",
".dib",
".bufr",
".cur",
".pcx",
".dcx",
".dds",
".ps",
".eps",
".fit",
".fits",
".fli",
".flc",
".ftc",
".ftu",
".gbr",
".gif",
".grib",
".h5",
".hdf",
".png",
".apng",
".jp2",
".j2k",
".jpc",
".jpf",
".jpx",
".j2c",
".icns",
".ico",
".im",
".iim",
".tif",
".tiff",
".jfif",
".jpe",
".jpg",
".jpeg",
".mpg",
".mpeg",
".msp",
".pcd",
".pxr",
".pbm",
".pgm",
".ppm",
".pnm",
".psd",
".bw",
".rgb",
".rgba",
".sgi",
".ras",
".tga",
".icb",
".vda",
".vst",
".webp",
".wmf",
".emf",
".xbm",
".xpm",
]
ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = self.merge_documents(query, run_manager)
return merged_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = await self.amerge_documents(query, run_manager)
return merged_documents
def merge_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
)
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
import asyncio
from typing import List
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: List[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = self.merge_documents(query, run_manager)
return merged_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = await self.amerge_documents(query, run_manager)
return merged_documents
def merge_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={
"callbacks": run_manager.get_child("retriever_{}".format(i + 1))
},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={
"callbacks": run_manager.get_child("retriever_{}".format(i + 1))
},
)
for i, retriever in enumerate(self.retrievers)
)
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
from __future__ import annotations
from typing import Any, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]],
combine_docs_chain: Runnable[dict[str, Any], str],
) -> Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
retrieval_chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x["input"]) | retriever
retrieval_chain = (
RunnablePassthrough.assign(
context=retrieval_docs.with_config(run_name="retrieve_documents"),
).assign(answer=combine_docs_chain)
).with_config(run_name="retrieval_chain")
return retrieval_chain
|
from __future__ import annotations
from typing import Any, Dict, Union
from langchain_core.retrievers import (
BaseRetriever,
RetrieverOutput,
)
from langchain_core.runnables import Runnable, RunnablePassthrough
def create_retrieval_chain(
retriever: Union[BaseRetriever, Runnable[dict, RetrieverOutput]],
combine_docs_chain: Runnable[Dict[str, Any], str],
) -> Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
retrieval_chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x["input"]) | retriever
retrieval_chain = (
RunnablePassthrough.assign(
context=retrieval_docs.with_config(run_name="retrieve_documents"),
).assign(answer=combine_docs_chain)
).with_config(run_name="retrieval_chain")
return retrieval_chain
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (datapoints.Image, datapoints.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
import copy as cp
import dataclasses
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in fields(dc)]
class BaseDCType:
_data_class = None
def __init__(
self: 'T',
_obj: Optional['T'] = None,
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
**kwargs,
):
self._data = None
if isinstance(_obj, type(self)):
if copy:
self.copy_from(_obj)
else:
self._data = _obj._data
elif isinstance(_obj, dict):
kwargs.update(_obj)
elif is_multimodal(_obj):
self._data = type(self)._from_dataclass(_obj)._data
if kwargs:
try:
if self._data is not None:
if self._unresolved_fields_dest in kwargs.keys():
getattr(self, self._unresolved_fields_dest).update(
kwargs[self._unresolved_fields_dest]
)
kwargs.pop(self._unresolved_fields_dest)
self._data = dataclasses.replace(self._data, **kwargs)
else:
self._data = self._data_class(self, **kwargs)
except TypeError as ex:
if unknown_fields_handler == 'raise':
raise AttributeError(f'unknown attributes') from ex
else:
if field_resolver:
kwargs = {
field_resolver.get(k, k): v for k, v in kwargs.items()
}
_fields = _get_fields(self._data_class)
_unknown_kwargs = None
_unresolved = set(kwargs.keys()).difference(_fields)
if _unresolved:
_unknown_kwargs = {k: kwargs[k] for k in _unresolved}
for k in _unresolved:
kwargs.pop(k)
if self._data is not None:
self._data = dataclasses.replace(self._data, **kwargs)
else:
self._data = self._data_class(self, **kwargs)
if _unknown_kwargs and unknown_fields_handler == 'catch':
getattr(self, self._unresolved_fields_dest).update(
_unknown_kwargs
)
for k in self._post_init_fields:
if k in kwargs:
setattr(self, k, kwargs[k])
if not _obj and not kwargs and self._data is None:
self._data = self._data_class(self)
if self._data is None:
raise ValueError(
f'Failed to initialize {typename(self)} from obj={_obj}, kwargs={kwargs}'
)
def copy_from(self: 'T', other: 'T') -> None:
"""Overwrite self by copying from another :class:`Document`.
:param other: the other Document to copy from
"""
self._data = cp.deepcopy(other._data)
def clear(self) -> None:
"""Clear all fields from this :class:`Document` to their default values."""
for f in self.non_empty_fields:
setattr(self._data, f, None)
def pop(self, *fields) -> None:
"""Clear some fields from this :class:`Document` to their default values.
:param fields: field names to clear.
"""
for f in fields:
if hasattr(self, f):
setattr(self._data, f, None)
@property
def non_empty_fields(self) -> Tuple[str]:
"""Get all non-emtpy fields of this :class:`Document`.
Non-empty fields are the fields with not-`None` and not-default values.
:return: field names in a tuple.
"""
return self._data._non_empty_fields
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __hash__(self):
return hash(self._data)
def __repr__(self):
content = str(self.non_empty_fields)
content += f' at {getattr(self, "id", id(self))}'
return f'<{self.__class__.__name__} {content.strip()}>'
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if type(self) is type(other):
return self._data == other._data
return False
|
import copy as cp
from dataclasses import fields
from functools import lru_cache
from typing import TYPE_CHECKING, Optional, Tuple, Dict
from docarray.dataclasses import is_multimodal
from docarray.helper import typename
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
@lru_cache()
def _get_fields(dc):
return [f.name for f in fields(dc)]
class BaseDCType:
_data_class = None
def __init__(
self: 'T',
_obj: Optional['T'] = None,
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
**kwargs,
):
self._data = None
if isinstance(_obj, type(self)):
if copy:
self.copy_from(_obj)
else:
self._data = _obj._data
elif isinstance(_obj, dict):
kwargs.update(_obj)
elif is_multimodal(_obj):
self._data = type(self)._from_dataclass(_obj)._data
if kwargs:
try:
self._data = self._data_class(self, **kwargs)
except TypeError as ex:
if unknown_fields_handler == 'raise':
raise AttributeError(f'unknown attributes') from ex
else:
if field_resolver:
kwargs = {
field_resolver.get(k, k): v for k, v in kwargs.items()
}
_fields = _get_fields(self._data_class)
_unknown_kwargs = None
_unresolved = set(kwargs.keys()).difference(_fields)
if _unresolved:
_unknown_kwargs = {k: kwargs[k] for k in _unresolved}
for k in _unresolved:
kwargs.pop(k)
self._data = self._data_class(self, **kwargs)
if _unknown_kwargs and unknown_fields_handler == 'catch':
getattr(self, self._unresolved_fields_dest).update(
_unknown_kwargs
)
for k in self._post_init_fields:
if k in kwargs:
setattr(self, k, kwargs[k])
if not _obj and not kwargs and self._data is None:
self._data = self._data_class(self)
if self._data is None:
raise ValueError(
f'Failed to initialize {typename(self)} from obj={_obj}, kwargs={kwargs}'
)
def copy_from(self: 'T', other: 'T') -> None:
"""Overwrite self by copying from another :class:`Document`.
:param other: the other Document to copy from
"""
self._data = cp.deepcopy(other._data)
def clear(self) -> None:
"""Clear all fields from this :class:`Document` to their default values."""
for f in self.non_empty_fields:
setattr(self._data, f, None)
def pop(self, *fields) -> None:
"""Clear some fields from this :class:`Document` to their default values.
:param fields: field names to clear.
"""
for f in fields:
if hasattr(self, f):
setattr(self._data, f, None)
@property
def non_empty_fields(self) -> Tuple[str]:
"""Get all non-emtpy fields of this :class:`Document`.
Non-empty fields are the fields with not-`None` and not-default values.
:return: field names in a tuple.
"""
return self._data._non_empty_fields
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __hash__(self):
return hash(self._data)
def __repr__(self):
content = str(self.non_empty_fields)
content += f' at {getattr(self, "id", id(self))}'
return f'<{self.__class__.__name__} {content.strip()}>'
def __bytes__(self):
return self.to_bytes()
def __eq__(self, other):
if type(self) is type(other):
return self._data == other._data
return False
|
import torch
from docarray import BaseDoc
from docarray.typing import TorchTensor
def test_tensor_ops():
class A(BaseDoc):
tensor: TorchTensor[3, 224, 224]
class B(BaseDoc):
tensor: TorchTensor[3, 112, 224]
tensor = A(tensor=torch.ones(3, 224, 224)).tensor
tensord = A(tensor=torch.ones(3, 224, 224)).tensor
tensorn = torch.zeros(3, 224, 224)
tensorhalf = B(tensor=torch.ones(3, 112, 224)).tensor
tensorfull = torch.cat([tensorhalf, tensorhalf], dim=1)
assert type(tensor) == TorchTensor
assert type(tensor + tensord) == TorchTensor
assert type(tensor + tensorn) == TorchTensor
assert type(tensor + tensorfull) == TorchTensor
|
import torch
from docarray import BaseDocument
from docarray.typing import TorchTensor
def test_tensor_ops():
class A(BaseDocument):
tensor: TorchTensor[3, 224, 224]
class B(BaseDocument):
tensor: TorchTensor[3, 112, 224]
tensor = A(tensor=torch.ones(3, 224, 224)).tensor
tensord = A(tensor=torch.ones(3, 224, 224)).tensor
tensorn = torch.zeros(3, 224, 224)
tensorhalf = B(tensor=torch.ones(3, 112, 224)).tensor
tensorfull = torch.cat([tensorhalf, tensorhalf], dim=1)
assert type(tensor) == TorchTensor
assert type(tensor + tensord) == TorchTensor
assert type(tensor + tensorn) == TorchTensor
assert type(tensor + tensorfull) == TorchTensor
|
import numpy as np
import numpy.typing as npt
def oscillator_bank(
frequencies,
amplitudes,
sample_rate: float,
time_axis: int = -2,
):
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi2 = 2.0 * np.pi
freqs = frequencies * pi2 / sample_rate % pi2
phases = np.cumsum(freqs, axis=time_axis, dtype=freqs.dtype)
waveform = amplitudes * np.sin(phases)
return waveform
def sinc_ir(cutoff, window_size: int = 513, high_pass: bool = False):
if window_size % 2 == 0:
raise ValueError(f"`window_size` must be odd. Given: {window_size}")
half = window_size // 2
dtype = cutoff.dtype
idx = np.linspace(-half, half, window_size, dtype=dtype)
filt = np.sinc(cutoff[..., None] * idx[None, ...])
filt *= np.hamming(window_size).astype(dtype)[None, ...]
filt /= np.abs(filt.sum(axis=-1, keepdims=True))
if high_pass:
filt *= -1
filt[..., half] = 1.0 + filt[..., half]
return filt
def freq_ir(magnitudes):
ir = np.fft.fftshift(np.fft.irfft(magnitudes), axes=-1)
window = np.hanning(ir.shape[-1])
return (ir * window).astype(magnitudes.dtype)
def exp_sigmoid(
input: npt.NDArray, exponent: float = 10.0, max_value: float = 2.0, threshold: float = 1e-7
) -> npt.NDArray:
"""Exponential Sigmoid pointwise nonlinearity (Numpy version).
Implements the equation:
``max_value`` * sigmoid(``input``) ** (log(``exponent``)) + ``threshold``
The output has a range of [``threshold``, ``max_value``].
``exponent`` controls the slope of the output.
Args:
input (np.ndarray): Input array
exponent (float, optional): Exponent. Controls the slope of the output
max_value (float, optional): Maximum value of the output
threshold (float, optional): Minimum value of the output
Returns:
np.ndarray: Exponential Sigmoid output. Shape: same as input
"""
return max_value * (1 / (1 + np.exp(-input, dtype=input.dtype))) ** np.log(exponent, dtype=input.dtype) + threshold
|
import numpy as np
def oscillator_bank(
frequencies,
amplitudes,
sample_rate: float,
time_axis: int = -2,
):
"""Reference implementation of oscillator_bank"""
invalid = np.abs(frequencies) >= sample_rate / 2
if np.any(invalid):
amplitudes = np.where(invalid, 0.0, amplitudes)
pi2 = 2.0 * np.pi
freqs = frequencies * pi2 / sample_rate % pi2
phases = np.cumsum(freqs, axis=time_axis, dtype=freqs.dtype)
waveform = amplitudes * np.sin(phases)
return waveform
def sinc_ir(cutoff, window_size: int = 513, high_pass: bool = False):
if window_size % 2 == 0:
raise ValueError(f"`window_size` must be odd. Given: {window_size}")
half = window_size // 2
dtype = cutoff.dtype
idx = np.linspace(-half, half, window_size, dtype=dtype)
filt = np.sinc(cutoff[..., None] * idx[None, ...])
filt *= np.hamming(window_size).astype(dtype)[None, ...]
filt /= np.abs(filt.sum(axis=-1, keepdims=True))
if high_pass:
filt *= -1
filt[..., half] = 1.0 + filt[..., half]
return filt
def freq_ir(magnitudes):
ir = np.fft.fftshift(np.fft.irfft(magnitudes), axes=-1)
window = np.hanning(ir.shape[-1])
return (ir * window).astype(magnitudes.dtype)
def exp_sigmoid(
input: np.ndarray, exponent: float = 10.0, max_value: float = 2.0, threshold: float = 1e-7
) -> np.ndarray:
"""Exponential Sigmoid pointwise nonlinearity (Numpy version).
Implements the equation:
``max_value`` * sigmoid(``input``) ** (log(``exponent``)) + ``threshold``
The output has a range of [``threshold``, ``max_value``].
``exponent`` controls the slope of the output.
Args:
input (np.ndarray): Input array
exponent (float, optional): Exponent. Controls the slope of the output
max_value (float, optional): Maximum value of the output
threshold (float, optional): Minimum value of the output
Returns:
np.ndarray: Exponential Sigmoid output. Shape: same as input
"""
return max_value * (1 / (1 + np.exp(-input, dtype=input.dtype))) ** np.log(exponent, dtype=input.dtype) + threshold
|
__version__ = '0.30.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.30.0a3'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner
class TestApproxIoUAssigner(TestCase):
def test_approx_iou_assigner(self):
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_boxes(self):
"""Test corner case where an network might predict no boxes."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
def test_approx_iou_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an network might predict no boxes and no
gt."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner
class TestApproxIoUAssigner(TestCase):
def test_approx_iou_assigner(self):
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_boxes(self):
"""Test corner case where an network might predict no boxes."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
def test_approx_iou_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an network might predict no boxes and no
gt."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"zip_equal",
]
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"zip_equal",
]
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a ``PIL Image`` to a tensor of the same type.
.. betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
"""[BETA] Convert a tensor or an ndarray or PIL Image to :class:`~torchvision.datapoints.Image`.
.. betastatus:: ToImageTensor transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image_tensor(inpt)
class ToImagePIL(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image.
.. betastatus:: ToImagePIL transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a ``PIL Image`` to a tensor of the same type.
.. betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image_tensor(inpt)
class ToImagePIL(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image.
.. betastatus:: ToImagePIL transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neptune import (
NeptuneAnalyticsGraphStore,
NeptuneDatabaseGraphStore,
)
from llama_index.graph_stores.neptune.base import NeptuneBaseGraphStore
def test_neptune_analytics_graph_store():
names_of_bases = [b.__name__ for b in NeptuneAnalyticsGraphStore.__bases__]
assert NeptuneBaseGraphStore.__name__ in names_of_bases
def test_neptune_database_graph_store():
names_of_bases = [b.__name__ for b in NeptuneDatabaseGraphStore.__bases__]
assert NeptuneBaseGraphStore.__name__ in names_of_bases
def test_neptune_base_graph_store():
names_of_bases = [b.__name__ for b in NeptuneBaseGraphStore.__bases__]
assert GraphStore.__name__ in names_of_bases
|
from unittest.mock import MagicMock, patch
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.neptune import (
NeptuneAnalyticsGraphStore,
NeptuneDatabaseGraphStore,
)
@patch("llama_index.graph_stores.neptune.NeptuneAnalyticsGraphStore")
def test_neptune_analytics_graph_store(MockNeptuneAnalyticsGraphStore: MagicMock):
instance: NeptuneAnalyticsGraphStore = MockNeptuneAnalyticsGraphStore.return_value()
assert isinstance(instance, GraphStore)
@patch("llama_index.graph_stores.neptune.NeptuneDatabaseGraphStore")
def test_neptune_analytics_graph_store(MockNeptuneDatabaseGraphStore: MagicMock):
instance: NeptuneDatabaseGraphStore = MockNeptuneDatabaseGraphStore.return_value()
assert isinstance(instance, GraphStore)
|
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
"steak": [9, 8, 7, 6],
}
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output.keys():
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType == dict[str, Any]
|
"""Test PandasDataframeParser"""
from typing import Any, Dict
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
"steak": [9, 8, 7, 6],
}
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output.keys():
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType is Dict[str, Any]
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.NaN
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.NaN
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None:
protocols = (p,) if isinstance(p := fs.protocol, str) else p
if "file" not in protocols:
return True
return False
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
is_local = not is_remote_filesystem(fs)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
is_local = not is_remote_filesystem(fs)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from typing import Literal
from langchain_core.documents import Document
from langchain_core.indexing.api import _get_document_with_hash
def test_hashed_document_hashing() -> None:
document = Document(
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _get_document_with_hash(document, key_encoder="sha1")
assert isinstance(hashed_document.id, str)
def test_to_document() -> None:
"""Test to_document method."""
original_doc = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_doc = _get_document_with_hash(original_doc, key_encoder="sha1")
assert isinstance(hashed_doc, Document)
assert hashed_doc is not original_doc
assert hashed_doc.page_content == "Lorem ipsum dolor sit amet"
assert hashed_doc.metadata["key"] == "value"
def test_hashing() -> None:
"""Test from document class method."""
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _get_document_with_hash(document, key_encoder="sha1")
# hash should be deterministic
assert hashed_document.id == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
# Verify that hashing with sha1 is determinstic
another_hashed_document = _get_document_with_hash(document, key_encoder="sha1")
assert another_hashed_document.id == hashed_document.id
# Verify that the result is different from SHA256, SHA512, blake2b
values: list[Literal["sha256", "sha512", "blake2b"]] = [
"sha256",
"sha512",
"blake2b",
]
for key_encoder in values:
different_hashed_document = _get_document_with_hash(
document, key_encoder=key_encoder
)
assert different_hashed_document.id != hashed_document.id
def test_hashing_custom_key_encoder() -> None:
"""Test hashing with a custom key encoder."""
def custom_key_encoder(doc: Document) -> str:
return f"quack-{doc.metadata['key']}"
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "like a duck"}
)
hashed_document = _get_document_with_hash(document, key_encoder=custom_key_encoder)
assert hashed_document.id == "quack-like a duck"
assert isinstance(hashed_document.id, str)
|
import pytest
from langchain_core.documents import Document
from langchain_core.indexing.api import _HashedDocument
def test_hashed_document_hashing() -> None:
hashed_document = _HashedDocument( # type: ignore[call-arg]
uid="123", page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert isinstance(hashed_document.hash_, str)
def test_hashing_with_missing_content() -> None:
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(TypeError):
_HashedDocument(
metadata={"key": "value"},
) # type: ignore[call-arg]
def test_uid_auto_assigned_to_hash() -> None:
"""Test uid is auto-assigned to the hashed_document hash."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
assert hashed_document.uid == hashed_document.hash_
def test_to_document() -> None:
"""Test to_document method."""
hashed_document = _HashedDocument( # type: ignore[call-arg]
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == "Lorem ipsum dolor sit amet"
assert doc.metadata == {"key": "value"}
def test_from_document() -> None:
"""Test from document class method."""
document = Document(
page_content="Lorem ipsum dolor sit amet", metadata={"key": "value"}
)
hashed_document = _HashedDocument.from_document(document)
# hash should be deterministic
assert hashed_document.hash_ == "fd1dc827-051b-537d-a1fe-1fa043e8b276"
assert hashed_document.uid == hashed_document.hash_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.