input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class BaseImageProcessorFast(metaclass=DummyObject):
_backends = ["torchvision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torchvision"])
class BaseVideoProcessor(metaclass=DummyObject):
_backends = ["torchvision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torchvision"])
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class BaseImageProcessorFast(metaclass=DummyObject):
_backends = ["torchvision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torchvision"])
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
"""Hybrid Fusion Retriever Pack."""
import os
from typing import Any, Dict, List
from llama_index.core import Settings
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.schema import Document, TextNode
from llama_index.retrievers.bm25 import BM25Retriever
class HybridFusionRetrieverPack(BaseLlamaPack):
"""
Hybrid fusion retriever pack.
Ensembles vector and bm25 retrievers using fusion.
"""
def __init__(
self,
nodes: List[TextNode] = None,
chunk_size: int = 256,
mode: str = "reciprocal_rerank",
vector_similarity_top_k: int = 2,
bm25_similarity_top_k: int = 2,
fusion_similarity_top_k: int = 2,
num_queries: int = 4,
documents: List[Document] = None,
cache_dir: str = None,
**kwargs: Any,
) -> None:
"""Init params."""
Settings.chunk_size = chunk_size
if cache_dir is not None and os.path.exists(cache_dir):
# Load from cache
from llama_index import StorageContext, load_index_from_storage
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=cache_dir)
# load index
index = load_index_from_storage(storage_context)
elif documents is not None:
index = VectorStoreIndex.from_documents(documents=documents)
else:
index = VectorStoreIndex(nodes)
if cache_dir is not None and not os.path.exists(cache_dir):
index.storage_context.persist(persist_dir=cache_dir)
self.vector_retriever = index.as_retriever(
similarity_top_k=vector_similarity_top_k
)
self.bm25_retriever = BM25Retriever.from_defaults(
docstore=index.docstore, similarity_top_k=bm25_similarity_top_k
)
self.fusion_retriever = QueryFusionRetriever(
[self.vector_retriever, self.bm25_retriever],
similarity_top_k=fusion_similarity_top_k,
num_queries=num_queries, # set this to 1 to disable query generation
mode=mode,
use_async=True,
verbose=True,
# query_gen_prompt="...", # we could override the query generation prompt here
)
self.query_engine = RetrieverQueryEngine.from_args(self.fusion_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_retriever": self.vector_retriever,
"bm25_retriever": self.bm25_retriever,
"fusion_retriever": self.fusion_retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.fusion_retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Hybrid Fusion Retriever Pack."""
import os
from typing import Any, Dict, List
from llama_index.core import Settings
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.schema import Document, TextNode
from llama_index.retrievers.bm25 import BM25Retriever
class HybridFusionRetrieverPack(BaseLlamaPack):
"""Hybrid fusion retriever pack.
Ensembles vector and bm25 retrievers using fusion.
"""
def __init__(
self,
nodes: List[TextNode] = None,
chunk_size: int = 256,
mode: str = "reciprocal_rerank",
vector_similarity_top_k: int = 2,
bm25_similarity_top_k: int = 2,
fusion_similarity_top_k: int = 2,
num_queries: int = 4,
documents: List[Document] = None,
cache_dir: str = None,
**kwargs: Any,
) -> None:
"""Init params."""
Settings.chunk_size = chunk_size
if cache_dir is not None and os.path.exists(cache_dir):
# Load from cache
from llama_index import StorageContext, load_index_from_storage
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=cache_dir)
# load index
index = load_index_from_storage(storage_context)
elif documents is not None:
index = VectorStoreIndex.from_documents(documents=documents)
else:
index = VectorStoreIndex(nodes)
if cache_dir is not None and not os.path.exists(cache_dir):
index.storage_context.persist(persist_dir=cache_dir)
self.vector_retriever = index.as_retriever(
similarity_top_k=vector_similarity_top_k
)
self.bm25_retriever = BM25Retriever.from_defaults(
docstore=index.docstore, similarity_top_k=bm25_similarity_top_k
)
self.fusion_retriever = QueryFusionRetriever(
[self.vector_retriever, self.bm25_retriever],
similarity_top_k=fusion_similarity_top_k,
num_queries=num_queries, # set this to 1 to disable query generation
mode=mode,
use_async=True,
verbose=True,
# query_gen_prompt="...", # we could override the query generation prompt here
)
self.query_engine = RetrieverQueryEngine.from_args(self.fusion_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_retriever": self.vector_retriever,
"bm25_retriever": self.bm25_retriever,
"fusion_retriever": self.fusion_retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.fusion_retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoSox,
TorchaudioTestCase,
)
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import unittest
import torch
import torchaudio.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import PytorchTestCase, TorchaudioTestCase, skipIfNoSox
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform, sample_rate, format, True, compression)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray, Image, Text
from docarray.typing import (
AnyEmbedding,
AnyTensor,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: AnyEmbedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray, Image, Text
from docarray.typing import (
AnyTensor,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.convnext import (
decode_predictions as decode_predictions,
)
from keras.src.applications.convnext import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.convnext import ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge
from keras.src.applications.convnext import decode_predictions
from keras.src.applications.convnext import preprocess_input
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import get_device_name
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
device = get_device_name()
if device == "hpu":
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == model.get_max_seq_length()
else:
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.001
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.001
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.001
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils import metadata_routing
from ..utils.deprecation import _deprecate_Xt_in_inverse_transform
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
# This prevents ``set_split_inverse_transform`` to be generated for the
# non-standard ``Xt`` arg on ``inverse_transform``.
# TODO(1.7): remove when Xt is removed for inverse_transform.
__metadata_request__inverse_transform = {"Xt": metadata_routing.UNUSED}
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X=None, *, Xt=None):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
.. deprecated:: 1.5
`Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
X = _deprecate_Xt_in_inverse_transform(X, Xt)
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils import metadata_routing
from ..utils.deprecation import _deprecate_Xt_in_inverse_transform
from ..utils.validation import check_is_fitted
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
# This prevents ``set_split_inverse_transform`` to be generated for the
# non-standard ``Xt`` arg on ``inverse_transform``.
# TODO(1.7): remove when Xt is removed for inverse_transform.
__metadata_request__inverse_transform = {"Xt": metadata_routing.UNUSED}
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X=None, *, Xt=None):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
.. deprecated:: 1.5
`Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
X = _deprecate_Xt_in_inverse_transform(X, Xt)
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import contextlib
import copyreg
import os
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.rpc as rpc
import torch.multiprocessing.reductions as TorchMpReductions
from torch import multiprocessing
from torch.distributed.rpc.api import _use_rpc_pickler
from torch.distributed.rpc.internal import _InternalRPCPickler
from torch.testing._internal.common_utils import run_tests, TestCase
@contextlib.contextmanager
def fs_sharing():
prev_strategy = multiprocessing.get_sharing_strategy()
multiprocessing.set_sharing_strategy("file_system")
try:
yield
finally:
multiprocessing.set_sharing_strategy(prev_strategy)
class ShareMemoryRPCPickler(_InternalRPCPickler):
def __init__(self) -> None:
super().__init__()
self._dispatch_table
# pyre-fixme[4]: Attribute must be annotated.
self._dispatch_table = copyreg.dispatch_table.copy()
for t in torch._storage_classes:
self._dispatch_table[t] = TorchMpReductions.reduce_storage
for t in torch._tensor_classes:
self._dispatch_table[t] = TorchMpReductions.reduce_tensor
self._dispatch_table[torch.Tensor] = TorchMpReductions.reduce_tensor
self._dispatch_table[torch.nn.parameter.Parameter] = (
TorchMpReductions.reduce_tensor
)
def worker_loop(a):
rpc.init_rpc("worker1", rank=1, world_size=2)
rpc.shutdown()
def worker_fn(m):
pass
class TestRPCPickler(TestCase):
def test_case(self):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
with fs_sharing():
r = multiprocessing.spawn(worker_loop, join=False)
try:
with _use_rpc_pickler(ShareMemoryRPCPickler()):
rpc.init_rpc("worker0", rank=0, world_size=2)
m = torch.nn.Linear(1, 2)
m.share_memory()
rref = rpc.remote("worker1", worker_fn, args=(m,))
rref.to_here()
finally:
rpc.shutdown()
r.join()
if __name__ == "__main__":
run_tests()
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import contextlib
import copyreg
import os
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.rpc as rpc
import torch.multiprocessing.reductions as TorchMpReductions
from torch import multiprocessing
from torch.distributed.rpc.api import _use_rpc_pickler
from torch.distributed.rpc.internal import _InternalRPCPickler
from torch.testing._internal.common_utils import run_tests, TestCase
@contextlib.contextmanager
def fs_sharing():
prev_strategy = multiprocessing.get_sharing_strategy()
multiprocessing.set_sharing_strategy("file_system")
try:
yield
finally:
multiprocessing.set_sharing_strategy(prev_strategy)
class ShareMemoryRPCPickler(_InternalRPCPickler):
def __init__(self) -> None:
super().__init__()
self._dispatch_table
# pyre-fixme[4]: Attribute must be annotated.
self._dispatch_table = copyreg.dispatch_table.copy()
for t in torch._storage_classes:
self._dispatch_table[t] = TorchMpReductions.reduce_storage
for t in torch._tensor_classes:
self._dispatch_table[t] = TorchMpReductions.reduce_tensor
self._dispatch_table[torch.Tensor] = TorchMpReductions.reduce_tensor
self._dispatch_table[
torch.nn.parameter.Parameter
] = TorchMpReductions.reduce_tensor
def worker_loop(a):
rpc.init_rpc("worker1", rank=1, world_size=2)
rpc.shutdown()
def worker_fn(m):
pass
class TestRPCPickler(TestCase):
def test_case(self):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
with fs_sharing():
r = multiprocessing.spawn(worker_loop, join=False)
try:
with _use_rpc_pickler(ShareMemoryRPCPickler()):
rpc.init_rpc("worker0", rank=0, world_size=2)
m = torch.nn.Linear(1, 2)
m.share_memory()
rref = rpc.remote("worker1", worker_fn, args=(m,))
rref.to_here()
finally:
rpc.shutdown()
r.join()
if __name__ == "__main__":
run_tests()
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
"""Test Anthropic API wrapper."""
from typing import List
from langchain_core.callbacks import (
CallbackManager,
)
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.litellm import ChatLiteLLM
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_litellm_call() -> None:
"""Test valid call to litellm."""
chat = ChatLiteLLM(
model="test",
)
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_litellm_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatLiteLLM(model="test")
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
def test_litellm_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatLiteLLM(model="test", streaming=True)
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_litellm_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatLiteLLM(
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
chat.invoke([message])
assert callback_handler.llm_streams > 1
|
"""Test Anthropic API wrapper."""
from typing import List
from langchain_core.callbacks import (
CallbackManager,
)
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.litellm import ChatLiteLLM
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_litellm_call() -> None:
"""Test valid call to litellm."""
chat = ChatLiteLLM( # type: ignore[call-arg]
model="test",
)
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_litellm_generate() -> None:
"""Test generate method of anthropic."""
chat = ChatLiteLLM(model="test") # type: ignore[call-arg]
chat_messages: List[List[BaseMessage]] = [
[HumanMessage(content="How many toes do dogs have?")]
]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
def test_litellm_streaming() -> None:
"""Test streaming tokens from anthropic."""
chat = ChatLiteLLM(model="test", streaming=True) # type: ignore[call-arg]
message = HumanMessage(content="Hello")
response = chat.invoke([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_litellm_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatLiteLLM( # type: ignore[call-arg]
model="test",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
chat.invoke([message])
assert callback_handler.llm_streams > 1
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
store = DummyDocIndex[SimpleDoc]()
assert store._db_config.other == 5
assert store._db_config.work_dir == '.'
assert store._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
store = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert store._db_config.other == 10
assert store._db_config.work_dir == 'hi'
store.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert store._runtime_config.default_column_config == {}
# change only some settings
store = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert store._db_config.other == 5
assert store._db_config.work_dir == 'hi'
store.configure(RuntimeConfig(default_column_config={}))
assert store._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
store = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert store._db_config.other == 10
assert store._db_config.work_dir == 'hi'
store.configure(default_column_config={}, default_ef=10)
assert store._runtime_config.default_column_config == {}
# change only some settings
store = DummyDocIndex[SimpleDoc](work_dir='hi')
assert store._db_config.other == 5
assert store._db_config.work_dir == 'hi'
store.configure(default_column_config={})
assert store._runtime_config.default_column_config == {}
def test_default_column_config():
store = DummyDocIndex[SimpleDoc]()
assert store._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDocument
from docarray.index.abstract import BaseDocumentIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDocument):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocumentIndex.DBConfig):
work_dir: str = '.'
other: int = 5
@dataclass
class RuntimeConfig(BaseDocumentIndex.RuntimeConfig):
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocumentIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
store = DummyDocIndex[SimpleDoc]()
assert store._db_config.other == 5
assert store._db_config.work_dir == '.'
assert store._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
store = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert store._db_config.other == 10
assert store._db_config.work_dir == 'hi'
store.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert store._runtime_config.default_column_config == {}
# change only some settings
store = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert store._db_config.other == 5
assert store._db_config.work_dir == 'hi'
store.configure(RuntimeConfig(default_column_config={}))
assert store._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
store = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert store._db_config.other == 10
assert store._db_config.work_dir == 'hi'
store.configure(default_column_config={}, default_ef=10)
assert store._runtime_config.default_column_config == {}
# change only some settings
store = DummyDocIndex[SimpleDoc](work_dir='hi')
assert store._db_config.other == 5
assert store._db_config.work_dir == 'hi'
store.configure(default_column_config={})
assert store._runtime_config.default_column_config == {}
def test_default_column_config():
store = DummyDocIndex[SimpleDoc]()
assert store._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
"""Base class of samplers."""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive samples."""
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative samples."""
pass
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
:obj:`SamplingResult`: Sampling result.
Example:
>>> from mmdet.core.bbox import RandomSampler
>>> from mmdet.core.bbox import AssignResult
>>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
>>> rng = ensure_rng(None)
>>> assign_result = AssignResult.random(rng=rng)
>>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
>>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
>>> gt_labels = None
>>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
>>> add_gt_as_proposals=False)
>>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
"""
if len(bboxes.shape) < 2:
bboxes = bboxes[None, :]
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
if gt_labels is None:
raise ValueError(
'gt_labels must be given when add_gt_as_proposals is True')
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
neg_inds = neg_inds.unique()
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
|
from abc import ABCMeta, abstractmethod
import torch
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
"""Base class of samplers."""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive samples."""
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative samples."""
pass
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
:obj:`SamplingResult`: Sampling result.
Example:
>>> from mmdet.core.bbox import RandomSampler
>>> from mmdet.core.bbox import AssignResult
>>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
>>> rng = ensure_rng(None)
>>> assign_result = AssignResult.random(rng=rng)
>>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
>>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
>>> gt_labels = None
>>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
>>> add_gt_as_proposals=False)
>>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
"""
if len(bboxes.shape) < 2:
bboxes = bboxes[None, :]
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
if gt_labels is None:
raise ValueError(
'gt_labels must be given when add_gt_as_proposals is True')
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
neg_inds = neg_inds.unique()
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
|
"""Google Trends API Toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleTrendsQueryRun": "langchain_community.tools.google_trends.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleTrendsQueryRun",
]
|
"""Google Trends API Toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleTrendsQueryRun": "langchain_community.tools.google_trends.tool"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleTrendsQueryRun",
]
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar
from backend.data.model import OAuth2Credentials
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[str]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(self, scopes: list[str], state: str) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(f"Using default scopes for provider {self.PROVIDER_NAME}")
scopes = self.DEFAULT_SCOPES
return scopes
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[str]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(self, scopes: list[str], state: str) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(f"Using default scopes for provider {self.PROVIDER_NAME}")
scopes = self.DEFAULT_SCOPES
return scopes
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from __future__ import annotations
__all__ = ["Array", "DType", "Device"]
_all_ignore = ["cp"]
from typing import TYPE_CHECKING
import cupy as cp
from cupy import ndarray as Array
from cupy.cuda.device import Device
if TYPE_CHECKING:
# NumPy 1.x on Python 3.10 fails to parse np.dtype[]
DType = cp.dtype[
cp.intp
| cp.int8
| cp.int16
| cp.int32
| cp.int64
| cp.uint8
| cp.uint16
| cp.uint32
| cp.uint64
| cp.float32
| cp.float64
| cp.complex64
| cp.complex128
| cp.bool_
]
else:
DType = cp.dtype
|
from __future__ import annotations
__all__ = [
"ndarray",
"Device",
"Dtype",
]
import sys
from typing import (
Union,
TYPE_CHECKING,
)
from cupy import (
ndarray,
dtype,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
)
from cupy.cuda.device import Device
if TYPE_CHECKING or sys.version_info >= (3, 9):
Dtype = dtype[Union[
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
]]
else:
Dtype = dtype
|
from typing import Union
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: Union[str, None] = Query(min_length=3)):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from typing import Union
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: Union[str, None] = Query(default=..., min_length=3)):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize, TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
:param sentences: A list of sentences
:param noise_fn: A noise function: Given a string, it returns a string with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize, TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from typing import TYPE_CHECKING, Any, Dict, List, Type
if TYPE_CHECKING:
from docarray import BaseDocument
def _is_access_path_valid(doc_type: Type['BaseDocument'], access_path: str) -> bool:
"""
Check if a given access path ("__"-separated) is a valid path for a given Document class.
"""
from docarray import BaseDocument
field, _, remaining = access_path.partition('__')
if len(remaining) == 0:
return access_path in doc_type.__fields__.keys()
else:
valid_field = field in doc_type.__fields__.keys()
if not valid_field:
return False
else:
d = doc_type._get_field_type(field)
if not issubclass(d, BaseDocument):
return False
else:
return _is_access_path_valid(d, remaining)
def _all_access_paths_valid(
doc_type: Type['BaseDocument'], access_paths: List[str]
) -> List[bool]:
"""
Check if all access paths ("__"-separated) are valid for a given Document class.
"""
return [_is_access_path_valid(doc_type, path) for path in access_paths]
def _access_path_to_dict(access_path: str, value) -> Dict[str, Any]:
"""
Convert an access path ("__"-separated) and its value to a (potentially) nested dict.
EXAMPLE USAGE
.. code-block:: python
assert access_path_to_dict('image__url', 'img.png') == {'image': {'url': 'img.png'}}
"""
fields = access_path.split('__')
for field in reversed(fields):
result = {field: value}
value = result
return result
def _access_path_dict_to_nested_dict(access_path2val: Dict[str, Any]) -> Dict[Any, Any]:
"""
Convert a dict, where the keys are access paths ("__"-separated) to a nested dictionary.
EXAMPLE USAGE
.. code-block:: python
access_path2val = {'image__url': 'some.png'}
assert access_path_dict_to_nested_dict(access_path2val) == {
'image': {'url': 'some.png'}
}
:param access_path2val: dict with access_paths as keys
:return: nested dict where the access path keys are split into separate field names and nested keys
"""
nested_dict: Dict[Any, Any] = {}
for access_path, value in access_path2val.items():
field2val = _access_path_to_dict(
access_path=access_path,
value=value if value not in ['', 'None'] else None,
)
_update_nested_dicts(to_update=nested_dict, update_with=field2val)
return nested_dict
def _dict_to_access_paths(d: dict) -> Dict[str, Any]:
"""
Convert a (nested) dict to a Dict[access_path, value].
Access paths are defined as a path of field(s) separated by "__".
EXAMPLE USAGE
.. code-block:: python
assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'}
"""
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = _dict_to_access_paths(v)
for nested_k, nested_v in v.items():
new_key = '__'.join([k, nested_k])
result[new_key] = nested_v
else:
result[k] = v
return result
def _update_nested_dicts(
to_update: Dict[Any, Any], update_with: Dict[Any, Any]
) -> None:
"""
Update a dict with another one, while considering shared nested keys.
EXAMPLE USAGE:
.. code-block:: python
d1 = {'image': {'tensor': None}, 'title': 'hello'}
d2 = {'image': {'url': 'some.png'}}
update_nested_dicts(d1, d2)
assert d1 == {'image': {'tensor': None, 'url': 'some.png'}, 'title': 'hello'}
:param to_update: dict that should be updated
:param update_with: dict to update with
:return: merged dict
"""
for k, v in update_with.items():
if k not in to_update.keys():
to_update[k] = v
else:
_update_nested_dicts(to_update[k], update_with[k])
|
from typing import TYPE_CHECKING, Any, Dict, Type
if TYPE_CHECKING:
from docarray import BaseDocument
def is_access_path_valid(doc: Type['BaseDocument'], access_path: str) -> bool:
"""
Check if a given access path ("__"-separated) is a valid path for a given Document class.
"""
from docarray import BaseDocument
field, _, remaining = access_path.partition('__')
if len(remaining) == 0:
return access_path in doc.__fields__.keys()
else:
valid_field = field in doc.__fields__.keys()
if not valid_field:
return False
else:
d = doc._get_field_type(field)
if not issubclass(d, BaseDocument):
return False
else:
return is_access_path_valid(d, remaining)
def _access_path_to_dict(access_path: str, value) -> Dict[str, Any]:
"""
Convert an access path ("__"-separated) and its value to a (potentially) nested dict.
EXAMPLE USAGE
.. code-block:: python
assert access_path_to_dict('image__url', 'img.png') == {'image': {'url': 'img.png'}}
"""
fields = access_path.split('__')
for field in reversed(fields):
result = {field: value}
value = result
return result
def _dict_to_access_paths(d: dict) -> Dict[str, Any]:
"""
Convert a (nested) dict to a Dict[access_path, value].
Access paths are defined as a path of field(s) separated by "__".
EXAMPLE USAGE
.. code-block:: python
assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'}
"""
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = _dict_to_access_paths(v)
for nested_k, nested_v in v.items():
new_key = '__'.join([k, nested_k])
result[new_key] = nested_v
else:
result[k] = v
return result
def _update_nested_dicts(
to_update: Dict[Any, Any], update_with: Dict[Any, Any]
) -> None:
"""
Update a dict with another one, while considering shared nested keys.
EXAMPLE USAGE:
.. code-block:: python
d1 = {'image': {'tensor': None}, 'title': 'hello'}
d2 = {'image': {'url': 'some.png'}}
update_nested_dicts(d1, d2)
assert d1 == {'image': {'tensor': None, 'url': 'some.png'}, 'title': 'hello'}
:param to_update: dict that should be updated
:param update_with: dict to update with
:return: merged dict
"""
for k, v in update_with.items():
if k not in to_update.keys():
to_update[k] = v
else:
_update_nested_dicts(to_update[k], update_with[k])
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config(weaviate_client):
def get_text_field_data_type(index, index_name):
props = index._client.schema.get(index_name)["properties"]
text_field = [p for p in props if p["name"] == "text"][0]
return text_field["dataType"][0]
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type="string")
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="TextDoc")
index = WeaviateDocumentIndex[TextDoc](db_config=dbconfig)
assert get_text_field_data_type(index, "TextDoc") == "text"
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="StringDoc")
index = WeaviateDocumentIndex[StringDoc](db_config=dbconfig)
assert get_text_field_data_type(index, "StringDoc") == "string"
def test_index_name():
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type="string")
index = WeaviateDocumentIndex[TextDoc]()
assert index.index_name == TextDoc.__name__
index = WeaviateDocumentIndex[StringDoc]()
assert index.index_name == StringDoc.__name__
index = WeaviateDocumentIndex[StringDoc](index_name='BaseDoc')
assert index.index_name == 'BaseDoc'
index = WeaviateDocumentIndex[StringDoc](index_name='index_name')
assert index.index_name == 'Index_name'
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config(weaviate_client):
def get_text_field_data_type(index, index_name):
props = index._client.schema.get(index_name)["properties"]
text_field = [p for p in props if p["name"] == "text"][0]
return text_field["dataType"][0]
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type="string")
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="TextDoc")
index = WeaviateDocumentIndex[TextDoc](db_config=dbconfig)
assert get_text_field_data_type(index, "TextDoc") == "text"
dbconfig = WeaviateDocumentIndex.DBConfig(index_name="StringDoc")
index = WeaviateDocumentIndex[StringDoc](db_config=dbconfig)
assert get_text_field_data_type(index, "StringDoc") == "string"
def test_index_name():
class TextDoc(BaseDoc):
text: str = Field()
class StringDoc(BaseDoc):
text: str = Field(col_type="string")
index = WeaviateDocumentIndex[TextDoc]()
assert index.index_name == TextDoc.__name__
index = WeaviateDocumentIndex[StringDoc]()
assert index.index_name == StringDoc.__name__
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for *LJSpeech-1.1* [:footcite:`ljspeech17`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for LJSpeech-1.1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str):
``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import conv_tasnet_base, ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"conv_tasnet_base",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
"HDemucs",
"hdemucs_low",
"hdemucs_medium",
"hdemucs_high",
]
|
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
"HDemucs",
"hdemucs_low",
"hdemucs_medium",
"hdemucs_high",
]
|
import warnings
from typing import Any, List, Union
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List, Union
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import os
import pypdf
import pytest
import tempfile
from fpdf import FPDF
from llama_index.readers.file import PDFReader
from pathlib import Path
from typing import Dict
@pytest.fixture()
def multi_page_pdf() -> FPDF:
pdf = FPDF()
pdf.add_page()
pdf.set_font("Helvetica", size=12)
pdf.cell(200, 10, text="Page 1 Content", align="C")
pdf.add_page()
pdf.set_font("Helvetica", size=12)
pdf.cell(200, 10, text="Page 2 Content", align="C")
return pdf
@pytest.fixture()
def extra_info() -> Dict[str, str]:
return {"ABC": "abc", "DEF": "def"}
def test_pdfreader_loads_data_into_full_document(multi_page_pdf: FPDF) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
reader = PDFReader(return_full_document=True)
docs = reader.load_data(temp_file_path)
assert len(docs) == 1
assert docs[0].text == "\n".join(
f"Page {page + 1} Content" for page in range(multi_page_pdf.pages_count)
)
os.remove(temp_file.name)
def test_pdfreader_loads_data_into_multiple_documents(multi_page_pdf: FPDF) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
reader = PDFReader(return_full_document=False)
docs = reader.load_data(temp_file_path)
assert len(docs) == multi_page_pdf.pages_count
for page in range(multi_page_pdf.pages_count):
assert docs[page].text == f"Page {page + 1} Content"
os.remove(temp_file.name)
def test_pdfreader_loads_metadata_into_full_document(
multi_page_pdf: FPDF, extra_info: Dict[str, str]
) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
expected_metadata = {"file_name": temp_file_path.name}
expected_metadata.update(extra_info)
reader = PDFReader(return_full_document=True)
docs = reader.load_data(temp_file_path, extra_info)
assert len(docs) == 1
assert docs[0].metadata == expected_metadata
os.remove(temp_file.name)
def test_pdfreader_loads_metadata_into_multiple_documents(
multi_page_pdf: FPDF, extra_info: Dict[str, str]
) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
expected_metadata = {"file_name": temp_file_path.name}
expected_metadata.update(extra_info)
reader = PDFReader(return_full_document=False)
docs = reader.load_data(temp_file_path, extra_info)
pypdf_pdf = pypdf.PdfReader(temp_file_path)
assert len(docs) == multi_page_pdf.pages_count
for page in range(multi_page_pdf.pages_count):
expected_metadata["page_label"] = pypdf_pdf.page_labels[page]
assert docs[page].metadata == expected_metadata
os.remove(temp_file.name)
|
import os
import pypdf
import pytest
import tempfile
from fpdf import FPDF
from llama_index.readers.file import PDFReader
from pathlib import Path
from typing import Dict
@pytest.fixture()
def multi_page_pdf() -> FPDF:
pdf = FPDF()
pdf.add_page()
pdf.set_font("Helvetica", size=12)
pdf.cell(200, 10, text="Page 1 Content", align="C")
pdf.add_page()
pdf.set_font("Helvetica", size=12)
pdf.cell(200, 10, text="Page 2 Content", align="C")
return pdf
@pytest.fixture()
def extra_info() -> Dict[str, str]:
return {"ABC": "abc", "DEF": "def"}
def test_pdfreader_loads_data_into_full_document(multi_page_pdf: FPDF) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
reader = PDFReader(return_full_document=True)
docs = reader.load_data(temp_file_path)
assert len(docs) == 1
assert docs[0].text == "\n".join(
f"Page {page+1} Content" for page in range(multi_page_pdf.pages_count)
)
os.remove(temp_file.name)
def test_pdfreader_loads_data_into_multiple_documents(multi_page_pdf: FPDF) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
reader = PDFReader(return_full_document=False)
docs = reader.load_data(temp_file_path)
assert len(docs) == multi_page_pdf.pages_count
for page in range(multi_page_pdf.pages_count):
assert docs[page].text == f"Page {page+1} Content"
os.remove(temp_file.name)
def test_pdfreader_loads_metadata_into_full_document(
multi_page_pdf: FPDF, extra_info: Dict[str, str]
) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
expected_metadata = {"file_name": temp_file_path.name}
expected_metadata.update(extra_info)
reader = PDFReader(return_full_document=True)
docs = reader.load_data(temp_file_path, extra_info)
assert len(docs) == 1
assert docs[0].metadata == expected_metadata
os.remove(temp_file.name)
def test_pdfreader_loads_metadata_into_multiple_documents(
multi_page_pdf: FPDF, extra_info: Dict[str, str]
) -> None:
with tempfile.NamedTemporaryFile(
mode="w", delete=False, suffix=".pdf"
) as temp_file:
multi_page_pdf.output(temp_file.name)
temp_file_path = Path(temp_file.name)
expected_metadata = {"file_name": temp_file_path.name}
expected_metadata.update(extra_info)
reader = PDFReader(return_full_document=False)
docs = reader.load_data(temp_file_path, extra_info)
pypdf_pdf = pypdf.PdfReader(temp_file_path)
assert len(docs) == multi_page_pdf.pages_count
for page in range(multi_page_pdf.pages_count):
expected_metadata["page_label"] = pypdf_pdf.page_labels[page]
assert docs[page].metadata == expected_metadata
os.remove(temp_file.name)
|
from urllib.parse import urlparse, urlunparse
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.postprocessor.nvidia_rerank.utils import BASE_URL
import respx
@pytest.fixture()
def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
if not normalized_path.endswith("/v1"):
normalized_path += "/v1"
base_url = urlunparse(
(parsed.scheme, parsed.netloc, normalized_path, None, None, None)
)
# Intercept GET call for retrieving models using httpx.
respx_mock.get(f"{base_url}/models").respond(
json={
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
},
]
}
)
# Updated test for non-hosted URLs that may need normalization.
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/rankings",
"http://0.0.0.0:8888/ranking",
"http://test_url/.../v1",
"https://test_url/.../v1",
],
)
def test_base_url_invalid_not_hosted(
base_url: str, mock_v1_local_models2: None
) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
# Expect a warning if the URL does NOT already end with "/v1"
if not normalized_path.endswith("/v1"):
with pytest.warns(UserWarning, match="does not end in /v1"):
client = Interface(base_url=base_url)
else:
client = Interface(base_url=base_url)
# Assert that the client's base_url is normalized to end with '/v1'
assert client.base_url.endswith("/v1")
# Updated test for valid non-hosted URL.
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_v1_local_models2: None) -> None:
# The default model warning is expected in non-hosted mode
with pytest.warns(UserWarning, match="Default model is set") as record:
client = Interface(base_url=base_url)
# Also verify the base_url remains normalized (unchanged in this case)
assert client.base_url.endswith("/v1")
# Updated test for hosted base URL.
@pytest.mark.parametrize(
"base_url",
[BASE_URL],
)
def test_base_url_valid_hosted(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(base_url=base_url, api_key="BOGUS")
assert client._is_hosted
# Hosted client should use the provided base_url exactly.
assert client.base_url == base_url
# Updated test for proxy base URLs.
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1",
],
)
def test_proxy_base_url(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(api_key="NO_API_KEY_PROVIDED", base_url=base_url)
assert not client._is_hosted
# Since the URL is already normalized, verify it remains unchanged.
assert client.base_url == base_url
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(base_url: str, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
with pytest.raises(ValueError) as e:
Interface(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
|
from urllib.parse import urlparse, urlunparse
import pytest
from requests_mock import Mocker
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
@pytest.fixture()
def mock_v1_local_models2(requests_mock: Mocker, base_url: str) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
if not normalized_path.endswith("/v1"):
normalized_path += "/v1"
base_url = urlunparse(
(parsed.scheme, parsed.netloc, normalized_path, None, None, None)
)
requests_mock.get(
f"{base_url}/models",
json={
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
},
]
},
)
# test case for invalid base_url
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/rankings",
"http://0.0.0.0:8888/ranking",
"http://test_url/.../v1",
"https://test_url/.../v1",
],
)
def test_base_url_invalid_not_hosted(
base_url: str, mock_v1_local_models2: None
) -> None:
Interface(base_url=base_url)
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_v1_local_models2: None) -> None:
with pytest.warns(UserWarning) as record:
Interface(base_url=base_url)
assert "Default model is set" in str(record[0].message)
@pytest.mark.parametrize(
"base_url",
["https://ai.api.nvidia.com/v1"],
)
def test_base_url_valid_hosted(base_url: str, mock_v1_local_models2: None) -> None:
Interface(base_url=base_url, api_key="BOGUS")
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(base_url: str, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
with pytest.raises(ValueError) as e:
Interface(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1",
],
)
def test_proxy_base_url(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(api_key="NO_API_KEY_PROVIDED", base_url=base_url)
assert not client._is_hosted
assert base_url.startswith(client.base_url)
|
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
LAST = "last" # last token pooling
def __call__(self, array: np.ndarray) -> np.ndarray:
if self == self.CLS:
return self.cls_pooling(array)
elif self == self.LAST:
return self.last_pooling(array)
return self.mean_pooling(array)
@classmethod
@overload
def cls_pooling(cls, array: np.ndarray) -> np.ndarray: ...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def cls_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, 0]
if len(array.shape) == 2:
return array[0]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
def mean_pooling(cls, array: np.ndarray) -> np.ndarray:
if len(array.shape) == 3:
return array.mean(axis=1)
if len(array.shape) == 2:
return array.mean(axis=0)
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
@overload
def last_pooling(cls, array: np.ndarray) -> np.ndarray: ...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def last_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def last_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, -1]
if len(array.shape) == 2:
return array[-1]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
|
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
LAST = "last" # last token pooling
def __call__(self, array: np.ndarray) -> np.ndarray:
if self == self.CLS:
return self.cls_pooling(array)
elif self == self.LAST:
return self.last_pooling(array)
return self.mean_pooling(array)
@classmethod
@overload
def cls_pooling(cls, array: np.ndarray) -> np.ndarray:
...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def cls_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, 0]
if len(array.shape) == 2:
return array[0]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
def mean_pooling(cls, array: np.ndarray) -> np.ndarray:
if len(array.shape) == 3:
return array.mean(axis=1)
if len(array.shape) == 2:
return array.mean(axis=0)
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
@overload
def last_pooling(cls, array: np.ndarray) -> np.ndarray:
...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def last_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def last_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, -1]
if len(array.shape) == 2:
return array[-1]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import rms_normalization
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparse_sigmoid
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import rms_normalization
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
import base64
import email
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.gmail.utils import clean_email_body
class SearchArgsSchema(BaseModel):
"""Input for GetMessageTool."""
message_id: str = Field(
...,
description="The unique ID of the email message, retrieved from a search.",
)
class GmailGetMessage(GmailBaseTool):
"""Tool that gets a message by ID from Gmail."""
name: str = "get_gmail_message"
description: str = (
"Use this tool to fetch an email by message ID."
" Returns the thread ID, snippet, body, subject, and sender."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _run(
self,
message_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
)
message_data = query.execute()
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = ""
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get("Content-Disposition"))
if ctype == "text/plain" and "attachment" not in cdispo:
message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
break
else:
message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
body = clean_email_body(message_body)
return {
"id": message_id,
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
|
import base64
import email
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.gmail.utils import clean_email_body
class SearchArgsSchema(BaseModel):
"""Input for GetMessageTool."""
message_id: str = Field(
...,
description="The unique ID of the email message, retrieved from a search.",
)
class GmailGetMessage(GmailBaseTool): # type: ignore[override, override]
"""Tool that gets a message by ID from Gmail."""
name: str = "get_gmail_message"
description: str = (
"Use this tool to fetch an email by message ID."
" Returns the thread ID, snippet, body, subject, and sender."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _run(
self,
message_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
)
message_data = query.execute()
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = ""
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get("Content-Disposition"))
if ctype == "text/plain" and "attachment" not in cdispo:
message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
break
else:
message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
body = clean_email_body(message_body)
return {
"id": message_id,
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: TextDoc
image: ImageDoc
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDocument):
image: NdArray
da = DocumentArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocumentArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocumentArrayStacked)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.6.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.utils import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
|
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.utils import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
|
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You need to install matplotlib and restart your session for plot_example.py.')
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 5,
'metric': ('l1', 'l2'),
'verbose': 0
}
evals_result = {} # to record eval results for plotting
print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
evals_result=evals_result,
callbacks=[lgb.log_evaluation(10)])
print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
plt.show()
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print('Plotting split value histogram...')
ax = lgb.plot_split_value_histogram(gbm, feature='f26', bins='auto')
plt.show()
print('Plotting 54th tree...') # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=['split_gain'])
plt.show()
print('Plotting 54th tree with graphviz...')
graph = lgb.create_tree_digraph(gbm, tree_index=53, name='Tree54')
graph.render(view=True)
|
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You need to install matplotlib and restart your session for plot_example.py.')
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 5,
'metric': ('l1', 'l2'),
'verbose': 0
}
evals_result = {} # to record eval results for plotting
print('Starting training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
evals_result=evals_result,
verbose_eval=10)
print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
plt.show()
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print('Plotting split value histogram...')
ax = lgb.plot_split_value_histogram(gbm, feature='f26', bins='auto')
plt.show()
print('Plotting 54th tree...') # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=['split_gain'])
plt.show()
print('Plotting 54th tree with graphviz...')
graph = lgb.create_tree_digraph(gbm, tree_index=53, name='Tree54')
graph.render(view=True)
|
_base_ = './htc_without_semantic_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_seg=True),
roi_head=dict(
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
seg_scale_factor=1 / 8,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
pipeline=train_pipeline))
|
_base_ = './htc_without_semantic_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))))
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(
seg_prefix=data_root + 'stuffthingmaps/train2017/',
pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((3, 224, 224), dtype=np.float32)),
(np.ones((3, 100, 100), dtype=np.float32)),
(np.ones((3, 50, 40), dtype=np.float32)),
],
)
def test_paddle_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True,
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (2048,)
def test_paddle_batch():
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((3, 224, 224), dtype=np.float32))
for _ in range(25)
),
return_results=True,
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (2048,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str
):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True,
)
for path, count in docs_per_path:
embeddings = (
DocumentArray(results[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
assert len([em for em in embeddings if em is not None]) == count
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((3, 224, 224), dtype=np.float32)),
(np.ones((3, 100, 100), dtype=np.float32)),
(np.ones((3, 50, 40), dtype=np.float32)),
],
)
def test_paddle_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True,
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (2048,)
def test_paddle_batch():
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((3, 224, 224), dtype=np.float32))
for _ in range(25)
),
return_results=True,
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (2048,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str
):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True,
)
for path, count in docs_per_path:
embeddings = (
DocumentArray(results[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
assert len([em for em in embeddings if em is not None]) == count
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .device import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .runner import *
from .utils import *
from .version import __version__, version_info
from .visualization import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .device import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .runner import *
from .utils import *
from .visualization import *
|
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple
if TYPE_CHECKING:
from docarray import DocumentArray
from docarray.typing import AnyDNN, T, ArrayType
import numpy as np
class SingletonSugarMixin:
"""Provide sugary syntax for :class:`Document` by inheriting methods from :class:`DocumentArray`"""
@overload
def match(
self: 'T',
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
only_id: bool = False,
use_scipy: bool = False,
num_worker: Optional[int] = 1,
) -> 'T':
"""Matching the current Document against a set of Documents.
The result will be stored in :attr:`.matches`.
.. note::
When you want to match a set Documents (let's call it set `A`) against another set of Documents (set `B`),
where you want to find for each element in `A` what are its nearest neighbours in `B`.
Then you need :meth:`DocumentArray.match`
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
"""
...
def match(self: 'T', *args, **kwargs) -> 'T':
from docarray import DocumentArray
_tmp = DocumentArray(self)
_tmp.match(*args, **kwargs)
return self
@overload
def embed(
self: 'T',
embed_model: 'AnyDNN',
device: str = 'cpu',
batch_size: int = 256,
) -> 'T':
"""Fill the embedding of Documents inplace by using `embed_model`
:param embed_model: the embedding model written in Keras/Pytorch/Paddle
:param device: the computational device for `embed_model`, can be either
`cpu` or `cuda`.
:param batch_size: number of Documents in a batch for embedding
"""
def embed(self: 'T', *args, **kwargs) -> 'T':
from docarray import DocumentArray
_tmp = DocumentArray(self)
_tmp.embed(*args, **kwargs)
return self
def post(self: 'T', *args, **kwargs) -> 'T':
from docarray import DocumentArray
_tmp = DocumentArray(self)
return _tmp.post(*args, **kwargs)[0]
|
from typing import overload, TYPE_CHECKING, Union, Callable, Optional, Tuple
if TYPE_CHECKING:
from ... import DocumentArray
from ...typing import AnyDNN, T, ArrayType
import numpy as np
class SingletonSugarMixin:
"""Provide sugary syntax for :class:`Document` by inheriting methods from :class:`DocumentArray`"""
@overload
def match(
self: 'T',
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
only_id: bool = False,
use_scipy: bool = False,
num_worker: Optional[int] = 1,
) -> 'T':
"""Matching the current Document against a set of Documents.
The result will be stored in :attr:`.matches`.
.. note::
When you want to match a set Documents (let's call it set `A`) against another set of Documents (set `B`),
where you want to find for each element in `A` what are its nearest neighbours in `B`.
Then you need :meth:`DocumentArray.match`
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
"""
...
def match(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
_tmp.match(*args, **kwargs)
return self
@overload
def embed(
self: 'T',
embed_model: 'AnyDNN',
device: str = 'cpu',
batch_size: int = 256,
) -> 'T':
"""Fill the embedding of Documents inplace by using `embed_model`
:param embed_model: the embedding model written in Keras/Pytorch/Paddle
:param device: the computational device for `embed_model`, can be either
`cpu` or `cuda`.
:param batch_size: number of Documents in a batch for embedding
"""
def embed(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
_tmp.embed(*args, **kwargs)
return self
def post(self: 'T', *args, **kwargs) -> 'T':
from ... import DocumentArray
_tmp = DocumentArray(self)
return _tmp.post(*args, **kwargs)[0]
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.skip('jinahub not available')
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyExecutor'])
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
*args,
**kwargs,
):
mock(
name=name,
rebuild_image=kwargs.get(
'rebuild_image', args[2] if len(args) >= 3 else True
),
)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyExecutor'])
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
*args,
**kwargs,
):
mock(
name=name,
rebuild_image=kwargs.get(
'rebuild_image', args[2] if len(args) >= 3 else True
),
)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results'
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]
visualizer = dict(vis_backends=vis_backends)
# MMEngine support the following two ways, users can choose
# according to convenience
# default_hooks = dict(checkpoint=dict(interval=4))
_base_.default_hooks.checkpoint.interval = 4
# train_cfg = dict(val_interval=2)
_base_.train_cfg.val_interval = 2
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
vis_backends = [dict(type='LocalVisBackend'), dict(type='WandBVisBackend')]
visualizer = dict(vis_backends=vis_backends)
# MMEngine support the following two ways, users can choose
# according to convenience
# default_hooks = dict(checkpoint=dict(interval=4))
_base_.default_hooks.checkpoint.interval = 4
# train_cfg = dict(val_interval=2)
_base_.train_cfg.val_interval = 2
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.18'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.17'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from langchain_core.document_loaders import BaseBlobParser, BaseLoader
__all__ = ["BaseBlobParser", "BaseLoader"]
|
from langchain_core.document_loaders import BaseBlobParser, BaseLoader
__all__ = ["BaseLoader", "BaseBlobParser"]
|
"""Utilities to route metadata within scikit-learn estimators."""
# This module is not a separate sub-folder since that would result in a circular
# import issue.
#
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._metadata_requests import ( # noqa: F401
UNCHANGED,
UNUSED,
WARN,
MetadataRequest,
MetadataRouter,
MethodMapping,
_MetadataRequester,
_raise_for_params,
_raise_for_unsupported_routing,
_routing_enabled,
_RoutingNotSupportedMixin,
get_routing_for_object,
process_routing,
)
|
"""Utilities to route metadata within scikit-learn estimators."""
# This module is not a separate sub-folder since that would result in a circular
# import issue.
#
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._metadata_requests import WARN, UNUSED, UNCHANGED # noqa
from ._metadata_requests import get_routing_for_object # noqa
from ._metadata_requests import MetadataRouter # noqa
from ._metadata_requests import MetadataRequest # noqa
from ._metadata_requests import MethodMapping # noqa
from ._metadata_requests import process_routing # noqa
from ._metadata_requests import _MetadataRequester # noqa
from ._metadata_requests import _routing_enabled # noqa
from ._metadata_requests import _raise_for_params # noqa
from ._metadata_requests import _RoutingNotSupportedMixin # noqa
from ._metadata_requests import _raise_for_unsupported_routing # noqa
|
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_file_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"alexnet": AlexNet_Weights.IMAGENET1K_V1.url,
}
)
|
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_weight_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"alexnet": AlexNet_Weights.IMAGENET1K_V1.url,
}
)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. v2betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``v2.Compose([transforms.ToImageTensor(), v2.ToDtype(torch.float32, scale=True)])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `v2.Compose([transforms.ToImageTensor(), v2.ToDtype(torch.float32, scale=True)])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. v2betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmdet.utils import update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
import argparse
import functools
import traceback
from typing import Callable, List, Optional, Tuple
from torch.utils.jit.log_extract import (
extract_ir,
load_graph_and_inputs,
run_baseline_no_fusion,
run_nnc,
run_nvfuser,
)
"""
Usage:
1. Run your script and pipe into a log file
PYTORCH_JIT_LOG_LEVEL=">>graph_fuser" python3 my_test.py &> log.txt
2. Run log_extract:
log_extract.py log.txt --nvfuser --nnc-dynamic --nnc-static
You can also extract the list of extracted IR:
log_extract.py log.txt --output
Passing in --graphs 0 2 will only run graphs 0 and 2
"""
def test_runners(
graphs: List[str],
runners: List[Tuple[str, Callable]],
graph_set: Optional[List[int]],
):
for i, ir in enumerate(graphs):
_, inputs = load_graph_and_inputs(ir)
if graph_set and i not in graph_set:
continue
print(f"Running Graph {i}")
prev_result = None
prev_runner_name = None
for runner in runners:
runner_name, runner_fn = runner
try:
result = runner_fn(ir, inputs)
if prev_result:
improvement = (prev_result / result - 1) * 100
print(
f"{runner_name} : {result:.6f} ms improvement over {prev_runner_name}: improvement: {improvement:.2f}%"
)
else:
print(f"{runner_name} : {result:.6f} ms")
prev_result = result
prev_runner_name = runner_name
except RuntimeError:
print(f" Graph {i} failed for {runner_name} :", traceback.format_exc())
def run():
parser = argparse.ArgumentParser(
description="Extracts torchscript IR from log files and, optionally, benchmarks it or outputs the IR"
)
parser.add_argument("filename", help="Filename of log file")
parser.add_argument(
"--nvfuser", dest="nvfuser", action="store_true", help="benchmark nvfuser"
)
parser.add_argument(
"--no-nvfuser",
dest="nvfuser",
action="store_false",
help="DON'T benchmark nvfuser",
)
parser.set_defaults(nvfuser=False)
parser.add_argument(
"--nnc-static",
dest="nnc_static",
action="store_true",
help="benchmark nnc static",
)
parser.add_argument(
"--no-nnc-static",
dest="nnc_static",
action="store_false",
help="DON'T benchmark nnc static",
)
parser.set_defaults(nnc_static=False)
parser.add_argument(
"--nnc-dynamic",
dest="nnc_dynamic",
action="store_true",
help="nnc with dynamic shapes",
)
parser.add_argument(
"--no-nnc-dynamic",
dest="nnc_dynamic",
action="store_false",
help="don't benchmark nnc with dynamic shapes",
)
parser.set_defaults(nnc_dynamic=False)
parser.add_argument(
"--baseline", dest="baseline", action="store_true", help="benchmark baseline"
)
parser.add_argument(
"--no-baseline",
dest="baseline",
action="store_false",
help="DON'T benchmark baseline",
)
parser.set_defaults(baseline=False)
parser.add_argument(
"--output", dest="output", action="store_true", help="Output graph IR"
)
parser.add_argument(
"--no-output", dest="output", action="store_false", help="DON'T output graph IR"
)
parser.set_defaults(output=False)
parser.add_argument(
"--graphs", nargs="+", type=int, help="Run only specified graph indices"
)
args = parser.parse_args()
graphs = extract_ir(args.filename)
graph_set = args.graphs
graph_set = graph_set if graph_set else None
options = []
if args.baseline:
options.append(("Baseline no fusion", run_baseline_no_fusion))
if args.nnc_dynamic:
options.append(("NNC Dynamic", functools.partial(run_nnc, dynamic=True)))
if args.nnc_static:
options.append(("NNC Static", functools.partial(run_nnc, dynamic=False)))
if args.nvfuser:
options.append(("NVFuser", run_nvfuser))
test_runners(graphs, options, graph_set)
if args.output:
quoted = []
for i, ir in enumerate(graphs):
if graph_set and i not in graph_set:
continue
quoted.append('"""' + ir + '"""')
print("[" + ", ".join(quoted) + "]")
if __name__ == "__main__":
run()
|
import argparse
import functools
import traceback
from typing import Callable, List, Optional, Tuple
from torch.utils.jit.log_extract import (
extract_ir,
load_graph_and_inputs,
run_baseline_no_fusion,
run_nnc,
run_nvfuser,
)
"""
Usage:
1. Run your script and pipe into a log file
PYTORCH_JIT_LOG_LEVEL=">>graph_fuser" python3 my_test.py &> log.txt
2. Run log_extract:
log_extract.py log.txt --nvfuser --nnc-dynamic --nnc-static
You can also extract the list of extracted IR:
log_extract.py log.txt --output
Passing in --graphs 0 2 will only run graphs 0 and 2
"""
def test_runners(
graphs: List[str],
runners: List[Tuple[str, Callable]],
graph_set: Optional[List[int]],
):
for i, ir in enumerate(graphs):
_, inputs = load_graph_and_inputs(ir)
if graph_set and i not in graph_set:
continue
print(f"Running Graph {i}")
prev_result = None
prev_runner_name = None
for runner in runners:
runner_name, runner_fn = runner
try:
result = runner_fn(ir, inputs)
if prev_result:
improvement = (prev_result / result - 1) * 100
print(
f"{runner_name} : {result:.6f} ms improvement over {prev_runner_name}: improvement: {improvement:.2f}%"
)
else:
print(f"{runner_name} : {result:.6f} ms")
prev_result = result
prev_runner_name = runner_name
except RuntimeError:
print(f" Graph {i} failed for {runner_name} :", traceback.format_exc())
def run():
parser = argparse.ArgumentParser(
description="Extracts torchscript IR from log files and, optionally, benchmarks it or outputs the IR"
)
parser.add_argument("filename", help="Filename of log file")
parser.add_argument(
"--nvfuser", dest="nvfuser", action="store_true", help="benchmark nvfuser"
)
parser.add_argument(
"--no-nvfuser",
dest="nvfuser",
action="store_false",
help="DON'T benchmark nvfuser",
)
parser.set_defaults(nvfuser=False)
parser.add_argument(
"--nnc-static",
dest="nnc_static",
action="store_true",
help="benchmark nnc static",
)
parser.add_argument(
"--no-nnc-static",
dest="nnc_static",
action="store_false",
help="DON'T benchmark nnc static",
)
parser.set_defaults(nnc_static=False)
parser.add_argument(
"--nnc-dynamic",
dest="nnc_dynamic",
action="store_true",
help="nnc with dynamic shapes",
)
parser.add_argument(
"--no-nnc-dynamic",
dest="nnc_dynamic",
action="store_false",
help="DONT't benchmark nnc with dynamic shapes",
)
parser.set_defaults(nnc_dynamic=False)
parser.add_argument(
"--baseline", dest="baseline", action="store_true", help="benchmark baseline"
)
parser.add_argument(
"--no-baseline",
dest="baseline",
action="store_false",
help="DON'T benchmark baseline",
)
parser.set_defaults(baseline=False)
parser.add_argument(
"--output", dest="output", action="store_true", help="Output graph IR"
)
parser.add_argument(
"--no-output", dest="output", action="store_false", help="DON'T output graph IR"
)
parser.set_defaults(output=False)
parser.add_argument(
"--graphs", nargs="+", type=int, help="Run only specified graph indices"
)
args = parser.parse_args()
graphs = extract_ir(args.filename)
graph_set = args.graphs
graph_set = graph_set if graph_set else None
options = []
if args.baseline:
options.append(("Baseline no fusion", run_baseline_no_fusion))
if args.nnc_dynamic:
options.append(("NNC Dynamic", functools.partial(run_nnc, dynamic=True)))
if args.nnc_static:
options.append(("NNC Static", functools.partial(run_nnc, dynamic=False)))
if args.nvfuser:
options.append(("NVFuser", run_nvfuser))
test_runners(graphs, options, graph_set)
if args.output:
quoted = []
for i, ir in enumerate(graphs):
if graph_set and i not in graph_set:
continue
quoted.append('"""' + ir + '"""')
print("[" + ", ".join(quoted) + "]")
if __name__ == "__main__":
run()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._tv_tensor import TVTensor
class Video(TVTensor):
""":class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._tv_tensor import TVTensor
class Video(TVTensor):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
"""
=================================================
Novelty detection with Local Outlier Factor (LOF)
=================================================
The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection
method which computes the local density deviation of a given data point with
respect to its neighbors. It considers as outliers the samples that have a
substantially lower density than their neighbors. This example shows how to
use LOF for novelty detection. Note that when LOF is used for novelty
detection you MUST not use predict, decision_function and score_samples on the
training set as this would lead to wrong results. You must only use these
methods on new unseen data (which are not in the training set). See
:ref:`User Guide <outlier_detection>`: for details on the difference between
outlier detection and novelty detection and how to use LOF for outlier
detection.
The number of neighbors considered, (parameter n_neighbors) is typically
set 1) greater than the minimum number of samples a cluster has to contain,
so that other samples can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by samples that can potentially be
local outliers.
In practice, such information is generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
np.random.seed(42)
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate normal (not abnormal) training observations
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate new normal (not abnormal) observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model for novelty detection (novelty=True)
clf = LocalOutlierFactor(n_neighbors=20, novelty=True, contamination=0.1)
clf.fit(X_train)
# DO NOT use predict, decision_function and score_samples on X_train as this
# would give wrong results but only on new unseen data (not used in X_train),
# e.g. X_test, X_outliers or the meshgrid
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the learned frontier, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection with LOF")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors="darkred")
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors="palevioletred")
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c="white", s=s, edgecolors="k")
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s, edgecolors="k")
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s, edgecolors="k")
plt.axis("tight")
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend(
[mlines.Line2D([], [], color="darkred"), b1, b2, c],
[
"learned frontier",
"training observations",
"new regular observations",
"new abnormal observations",
],
loc=(1.05, 0.4),
prop=matplotlib.font_manager.FontProperties(size=11),
)
plt.xlabel(
"errors novel regular: %d/40 ; errors novel abnormal: %d/40"
% (n_error_test, n_error_outliers)
)
plt.tight_layout()
plt.show()
|
"""
=================================================
Novelty detection with Local Outlier Factor (LOF)
=================================================
The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection
method which computes the local density deviation of a given data point with
respect to its neighbors. It considers as outliers the samples that have a
substantially lower density than their neighbors. This example shows how to
use LOF for novelty detection. Note that when LOF is used for novelty
detection you MUST not use predict, decision_function and score_samples on the
training set as this would lead to wrong results. You must only use these
methods on new unseen data (which are not in the training set). See
:ref:`User Guide <outlier_detection>`: for details on the difference between
outlier detection and novelty detection and how to use LOF for outlier
detection.
The number of neighbors considered, (parameter n_neighbors) is typically
set 1) greater than the minimum number of samples a cluster has to contain,
so that other samples can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by samples that can potentially be
local outliers.
In practice, such information is generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
np.random.seed(42)
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate normal (not abnormal) training observations
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate new normal (not abnormal) observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model for novelty detection (novelty=True)
clf = LocalOutlierFactor(n_neighbors=20, novelty=True, contamination=0.1)
clf.fit(X_train)
# DO NOT use predict, decision_function and score_samples on X_train as this
# would give wrong results but only on new unseen data (not used in X_train),
# e.g. X_test, X_outliers or the meshgrid
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the learned frontier, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection with LOF")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors="darkred")
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors="palevioletred")
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c="white", s=s, edgecolors="k")
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s, edgecolors="k")
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s, edgecolors="k")
plt.axis("tight")
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend(
[mlines.Line2D([], [], color="darkred"), b1, b2, c],
[
"learned frontier",
"training observations",
"new regular observations",
"new abnormal observations",
],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11),
)
plt.xlabel(
"errors novel regular: %d/40 ; errors novel abnormal: %d/40"
% (n_error_test, n_error_outliers)
)
plt.show()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import MultiConfig, OptConfigType
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
@MODELS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Defaults to 4.
in_channels (int, optional): number of input channels. Defaults to 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Defaults to 256.
num_classes (int, optional): number of classes. Defaults to 80.
loss_weight (float, optional): global context loss weight.
Defaults to 1.
conv_cfg (dict, optional): config to init conv layer. Defaults to None.
norm_cfg (dict, optional): config to init norm layer. Defaults to None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection.
Defaults to False.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict. Defaults to
dict(type='Normal', std=0.01, override=dict(name='fc')).
"""
def __init__(
self,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 80,
loss_weight: float = 1.0,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
conv_to_res: bool = False,
init_cfg: MultiConfig = dict(
type='Normal', std=0.01, override=dict(name='fc'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (Tuple[Tensor]): Multi-scale feature maps.
Returns:
Tuple[Tensor]:
- mc_pred (Tensor): Multi-class prediction.
- x (Tensor): Global context feature.
"""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor:
"""Loss function.
Args:
pred (Tensor): Logits.
labels (list[Tensor]): Grouth truths.
Returns:
Tensor: Loss.
"""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
@MODELS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
import json
import os
from typing import List
import torch
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""
Bidirectional LSTM running over word embeddings.
"""
def __init__(self, word_embedding_dimension: int, hidden_dim: int, num_layers: int = 1, dropout: float = 0, bidirectional: bool = True):
nn.Module.__init__(self)
self.config_keys = ['word_embedding_dimension', 'hidden_dim', 'num_layers', 'dropout', 'bidirectional']
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(word_embedding_dimension, hidden_dim, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
def forward(self, features):
token_embeddings = features['token_embeddings']
sentence_lengths = torch.clamp(features['sentence_lengths'], min=1)
packed = nn.utils.rnn.pack_padded_sequence(token_embeddings, sentence_lengths, batch_first=True, enforce_sorted=False)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({'token_embeddings': unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, 'lstm_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'lstm_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
_base_ = './rtmdet_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './rtmdet_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import json
import multiprocessing
import os
import time
import pytest
from docarray import DocumentArray
from jina import Executor, requests
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import json
import multiprocessing
import os
import time
import pytest
from docarray import DocumentArray
from jina import Executor, requests
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import gc
import unittest
import numpy as np
import pytest
import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
Expectations,
backend_empty_cache,
numpy_cosine_similarity_distance,
require_big_accelerator,
slow,
torch_device,
)
@slow
@require_big_accelerator
@pytest.mark.big_gpu_with_torch_cuda
class FluxReduxSlowTests(unittest.TestCase):
pipeline_class = FluxPriorReduxPipeline
repo_id = "black-forest-labs/FLUX.1-Redux-dev"
base_pipeline_class = FluxPipeline
base_repo_id = "black-forest-labs/FLUX.1-schnell"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, seed=0):
init_image = load_image(
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png"
)
return {"image": init_image}
def get_base_pipeline_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"num_inference_steps": 2,
"guidance_scale": 2.0,
"output_type": "np",
"generator": generator,
}
def test_flux_redux_inference(self):
pipe_redux = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
pipe_base = self.base_pipeline_class.from_pretrained(
self.base_repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None
)
pipe_redux.to(torch_device)
pipe_base.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
base_pipeline_inputs = self.get_base_pipeline_inputs(torch_device)
redux_pipeline_output = pipe_redux(**inputs)
image = pipe_base(**base_pipeline_inputs, **redux_pipeline_output).images[0]
image_slice = image[0, :10, :10]
expected_slices = Expectations(
{
("cuda", 7): np.array(
[
0.30078125,
0.37890625,
0.46875,
0.28125,
0.36914062,
0.47851562,
0.28515625,
0.375,
0.4765625,
0.28125,
0.375,
0.48046875,
0.27929688,
0.37695312,
0.47851562,
0.27734375,
0.38085938,
0.4765625,
0.2734375,
0.38085938,
0.47265625,
0.27539062,
0.37890625,
0.47265625,
0.27734375,
0.37695312,
0.47070312,
0.27929688,
0.37890625,
0.47460938,
],
dtype=np.float32,
),
("xpu", 3): np.array(
[
0.20507812,
0.30859375,
0.3984375,
0.18554688,
0.30078125,
0.41015625,
0.19921875,
0.3125,
0.40625,
0.19726562,
0.3125,
0.41601562,
0.19335938,
0.31445312,
0.4140625,
0.1953125,
0.3203125,
0.41796875,
0.19726562,
0.32421875,
0.41992188,
0.19726562,
0.32421875,
0.41992188,
0.20117188,
0.32421875,
0.41796875,
0.203125,
0.32617188,
0.41796875,
],
dtype=np.float32,
),
}
)
expected_slice = expected_slices.get_expectation()
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4
|
import gc
import unittest
import numpy as np
import pytest
import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
numpy_cosine_similarity_distance,
require_big_accelerator,
slow,
torch_device,
)
@slow
@require_big_accelerator
@pytest.mark.big_gpu_with_torch_cuda
class FluxReduxSlowTests(unittest.TestCase):
pipeline_class = FluxPriorReduxPipeline
repo_id = "YiYiXu/yiyi-redux" # update to "black-forest-labs/FLUX.1-Redux-dev" once PR is merged
base_pipeline_class = FluxPipeline
base_repo_id = "black-forest-labs/FLUX.1-schnell"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, seed=0):
init_image = load_image(
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png"
)
return {"image": init_image}
def get_base_pipeline_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"num_inference_steps": 2,
"guidance_scale": 2.0,
"output_type": "np",
"generator": generator,
}
def test_flux_redux_inference(self):
pipe_redux = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
pipe_base = self.base_pipeline_class.from_pretrained(
self.base_repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None
)
pipe_redux.to(torch_device)
pipe_base.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
base_pipeline_inputs = self.get_base_pipeline_inputs(torch_device)
redux_pipeline_output = pipe_redux(**inputs)
image = pipe_base(**base_pipeline_inputs, **redux_pipeline_output).images[0]
image_slice = image[0, :10, :10]
expected_slice = np.array(
[
0.30078125,
0.37890625,
0.46875,
0.28125,
0.36914062,
0.47851562,
0.28515625,
0.375,
0.4765625,
0.28125,
0.375,
0.48046875,
0.27929688,
0.37695312,
0.47851562,
0.27734375,
0.38085938,
0.4765625,
0.2734375,
0.38085938,
0.47265625,
0.27539062,
0.37890625,
0.47265625,
0.27734375,
0.37695312,
0.47070312,
0.27929688,
0.37890625,
0.47460938,
],
dtype=np.float32,
)
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from mmengine.utils.dl_utils import torch_meshgrid
def test_torch_meshgrid():
# torch_meshgrid should not throw warning
with warnings.catch_warnings():
warnings.simplefilter('error')
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch_meshgrid(x, y)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine.utils.dl_utils import torch_meshgrid
def test_torch_meshgrid():
# torch_meshgrid should not throw warning
with pytest.warns(None) as record:
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch_meshgrid(x, y)
assert len(record) == 0
|
"""Utilities for the CI."""
import os
from datetime import datetime, timedelta
from functools import wraps
from typing import Any, Callable, Dict, TypedDict, TypeVar, Union
class DirectoryExcursion:
def __init__(self, path: Union[os.PathLike, str]) -> None:
self.path = path
self.curdir = os.path.normpath(os.path.abspath(os.path.curdir))
def __enter__(self) -> None:
os.chdir(self.path)
def __exit__(self, *args: Any) -> None:
os.chdir(self.curdir)
R = TypeVar("R")
def cd(path: Union[os.PathLike, str]) -> Callable:
"""Decorator for changing directory temporarily."""
def chdir(func: Callable[..., R]) -> Callable[..., R]:
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> R:
with DirectoryExcursion(path):
return func(*args, **kwargs)
return inner
return chdir
Record = TypedDict("Record", {"count": int, "total": timedelta})
timer: Dict[str, Record] = {}
def record_time(func: Callable[..., R]) -> Callable[..., R]:
"""Decorator for recording function runtime."""
global timer
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> R:
if func.__name__ not in timer:
timer[func.__name__] = {"count": 0, "total": timedelta(0)}
s = datetime.now()
try:
r = func(*args, **kwargs)
finally:
e = datetime.now()
timer[func.__name__]["count"] += 1
timer[func.__name__]["total"] += e - s
return r
return inner
def print_time() -> None:
"""Print all recorded items by :py:func:`record_time`."""
global timer
for k, v in timer.items():
print(
"Name:",
k,
"Called:",
v["count"],
"Elapsed:",
f"{v['total'].seconds} secs",
)
ROOT = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir
)
)
R_PACKAGE = os.path.join(ROOT, "R-package")
JVM_PACKAGES = os.path.join(ROOT, "jvm-packages")
PY_PACKAGE = os.path.join(ROOT, "python-package")
|
"""Utilities for the CI."""
import os
from datetime import datetime, timedelta
from functools import wraps
from typing import Any, Callable, Dict, TypedDict, TypeVar, Union
class DirectoryExcursion:
def __init__(self, path: Union[os.PathLike, str]) -> None:
self.path = path
self.curdir = os.path.normpath(os.path.abspath(os.path.curdir))
def __enter__(self) -> None:
os.chdir(self.path)
def __exit__(self, *args: Any) -> None:
os.chdir(self.curdir)
R = TypeVar("R")
def cd(path: Union[os.PathLike, str]) -> Callable:
"""Decorator for changing directory temporarily."""
def chdir(func: Callable[..., R]) -> Callable[..., R]:
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> R:
with DirectoryExcursion(path):
return func(*args, **kwargs)
return inner
return chdir
Record = TypedDict("Record", {"count": int, "total": timedelta})
timer: Dict[str, Record] = {}
def record_time(func: Callable[..., R]) -> Callable[..., R]:
"""Decorator for recording function runtime."""
global timer
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> R:
if func.__name__ not in timer:
timer[func.__name__] = {"count": 0, "total": timedelta(0)}
s = datetime.now()
try:
r = func(*args, **kwargs)
finally:
e = datetime.now()
timer[func.__name__]["count"] += 1
timer[func.__name__]["total"] += e - s
return r
return inner
def print_time() -> None:
"""Print all recorded items by :py:func:`record_time`."""
global timer
for k, v in timer.items():
print(
"Name:",
k,
"Called:",
v["count"],
"Elapsed:",
f"{v['total'].seconds} secs",
)
ROOT = os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir
)
)
R_PACKAGE = os.path.join(ROOT, "R-package")
JVM_PACKAGES = os.path.join(ROOT, "jvm-packages")
PY_PACKAGE = os.path.join(ROOT, "python-package")
|
from typing import (
TYPE_CHECKING,
Sequence,
)
import numpy as np
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
)
class DelItemMixin:
"""Provide help function to enable advanced indexing in `__delitem__`"""
def __delitem__(self, index: 'DocumentArrayIndexType'):
self._update_subindices_del(index)
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
self._del_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
raise NotImplementedError(
'Delete elements along traversal paths is not implemented'
)
else:
self._del_doc(index)
elif isinstance(index, slice):
self._del_docs_by_slice(index)
elif index is Ellipsis:
self._del_all_docs()
elif isinstance(index, Sequence):
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
del self[index[0]]
del self[index[1]]
else:
self._set_doc_attr_by_id(index[0], index[1], None)
elif isinstance(index[0], (slice, Sequence)):
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
for _d in self[index[0]]:
for _aa in _attrs:
self._set_doc_attr_by_id(_d.id, _aa, None)
_d.pop(_aa)
elif isinstance(index[0], bool):
self._del_docs_by_mask(index)
elif isinstance(index[0], int):
for t in sorted(index, reverse=True):
del self[t]
elif isinstance(index[0], str):
for t in index:
del self[t]
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
del self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
else:
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
from typing import (
TYPE_CHECKING,
Sequence,
)
import numpy as np
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
)
class DelItemMixin:
"""Provide help function to enable advanced indexing in `__delitem__`"""
def __delitem__(self, index: 'DocumentArrayIndexType'):
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
self._del_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
raise NotImplementedError(
'Delete elements along traversal paths is not implemented'
)
else:
self._del_doc(index)
elif isinstance(index, slice):
self._del_docs_by_slice(index)
elif index is Ellipsis:
self._del_all_docs()
elif isinstance(index, Sequence):
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
del self[index[0]]
del self[index[1]]
else:
self._set_doc_attr_by_id(index[0], index[1], None)
elif isinstance(index[0], (slice, Sequence)):
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
for _d in self[index[0]]:
for _aa in _attrs:
self._set_doc_attr_by_id(_d.id, _aa, None)
_d.pop(_aa)
elif isinstance(index[0], bool):
self._del_docs_by_mask(index)
elif isinstance(index[0], int):
for t in sorted(index, reverse=True):
del self[t]
elif isinstance(index[0], str):
for t in index:
del self[t]
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
del self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
else:
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
"""Bedrock Retriever."""
from typing import List, Optional, Dict, Any
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.utilities.aws_utils import get_aws_service_client
class AmazonKnowledgeBasesRetriever(BaseRetriever):
"""
`Amazon Bedrock Knowledge Bases` retrieval.
See https://aws.amazon.com/bedrock/knowledge-bases for more info.
Args:
knowledge_base_id: Knowledge Base ID.
retrieval_config: Configuration for retrieval.
profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
region_name: The aws region e.g., `us-west-2`.
Fallback to AWS_DEFAULT_REGION env variable or region specified in
~/.aws/config.
aws_access_key_id: The aws access key id.
aws_secret_access_key: The aws secret access key.
aws_session_token: AWS temporary session token.
Example:
.. code-block:: python
from llama_index.retrievers.bedrock import AmazonKnowledgeBasesRetriever
retriever = AmazonKnowledgeBasesRetriever(
knowledge_base_id="<knowledge-base-id>",
retrieval_config={
"vectorSearchConfiguration": {
"numberOfResults": 4,
"overrideSearchType": "SEMANTIC",
"filter": {
"equals": {
"key": "tag",
"value": "space"
}
}
}
},
)
"""
def __init__(
self,
knowledge_base_id: str,
retrieval_config: Optional[Dict[str, Any]] = None,
profile_name: Optional[str] = None,
region_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
):
self._client = get_aws_service_client(
service_name="bedrock-agent-runtime",
profile_name=profile_name,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
self.knowledge_base_id = knowledge_base_id
self.retrieval_config = retrieval_config
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(
retrievalQuery={"text": query.strip()},
knowledgeBaseId=self.knowledge_base_id,
retrievalConfiguration=self.retrieval_config,
)
results = response["retrievalResults"]
node_with_score = []
for result in results:
metadata = {}
if "location" in result:
metadata["location"] = result["location"]
if "metadata" in result:
metadata["sourceMetadata"] = result["metadata"]
node_with_score.append(
NodeWithScore(
node=TextNode(
text=result["content"]["text"],
metadata=metadata,
),
score=result["score"] if "score" in result else 0,
)
)
return node_with_score
|
"""Bedrock Retriever."""
from typing import List, Optional, Dict, Any
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.utilities.aws_utils import get_aws_service_client
class AmazonKnowledgeBasesRetriever(BaseRetriever):
"""`Amazon Bedrock Knowledge Bases` retrieval.
See https://aws.amazon.com/bedrock/knowledge-bases for more info.
Args:
knowledge_base_id: Knowledge Base ID.
retrieval_config: Configuration for retrieval.
profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
region_name: The aws region e.g., `us-west-2`.
Fallback to AWS_DEFAULT_REGION env variable or region specified in
~/.aws/config.
aws_access_key_id: The aws access key id.
aws_secret_access_key: The aws secret access key.
aws_session_token: AWS temporary session token.
Example:
.. code-block:: python
from llama_index.retrievers.bedrock import AmazonKnowledgeBasesRetriever
retriever = AmazonKnowledgeBasesRetriever(
knowledge_base_id="<knowledge-base-id>",
retrieval_config={
"vectorSearchConfiguration": {
"numberOfResults": 4,
"overrideSearchType": "SEMANTIC",
"filter": {
"equals": {
"key": "tag",
"value": "space"
}
}
}
},
)
"""
def __init__(
self,
knowledge_base_id: str,
retrieval_config: Optional[Dict[str, Any]] = None,
profile_name: Optional[str] = None,
region_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
):
self._client = get_aws_service_client(
service_name="bedrock-agent-runtime",
profile_name=profile_name,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
self.knowledge_base_id = knowledge_base_id
self.retrieval_config = retrieval_config
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(
retrievalQuery={"text": query.strip()},
knowledgeBaseId=self.knowledge_base_id,
retrievalConfiguration=self.retrieval_config,
)
results = response["retrievalResults"]
node_with_score = []
for result in results:
metadata = {}
if "location" in result:
metadata["location"] = result["location"]
if "metadata" in result:
metadata["sourceMetadata"] = result["metadata"]
node_with_score.append(
NodeWithScore(
node=TextNode(
text=result["content"]["text"],
metadata=metadata,
),
score=result["score"] if "score" in result else 0,
)
)
return node_with_score
|
_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = './retinanet_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
"""Pydantic output parser."""
import json
from typing import Any, Generic, List, Optional, Type
from llama_index.core.output_parsers.base import ChainableOutputParser
from llama_index.core.output_parsers.utils import extract_json_str
from llama_index.core.types import Model
PYDANTIC_FORMAT_TMPL = """
Here's a JSON schema to follow:
{schema}
Output a valid JSON object but do not repeat the schema.
"""
class PydanticOutputParser(ChainableOutputParser, Generic[Model]):
"""
Pydantic Output Parser.
Args:
output_cls (BaseModel): Pydantic output class.
"""
def __init__(
self,
output_cls: Type[Model],
excluded_schema_keys_from_format: Optional[List] = None,
pydantic_format_tmpl: str = PYDANTIC_FORMAT_TMPL,
) -> None:
"""Init params."""
self._output_cls = output_cls
self._excluded_schema_keys_from_format = excluded_schema_keys_from_format or []
self._pydantic_format_tmpl = pydantic_format_tmpl
@property
def output_cls(self) -> Type[Model]:
return self._output_cls
@property
def format_string(self) -> str:
"""Format string."""
return self.get_format_string(escape_json=True)
def get_format_string(self, escape_json: bool = True) -> str:
"""Format string."""
schema_dict = self._output_cls.model_json_schema()
for key in self._excluded_schema_keys_from_format:
del schema_dict[key]
schema_str = json.dumps(schema_dict)
output_str = self._pydantic_format_tmpl.format(schema=schema_str)
if escape_json:
return output_str.replace("{", "{{").replace("}", "}}")
else:
return output_str
def parse(self, text: str) -> Any:
"""Parse, validate, and correct errors programmatically."""
json_str = extract_json_str(text)
return self._output_cls.model_validate_json(json_str)
def format(self, query: str) -> str:
"""Format a query with structured output formatting instructions."""
return query + "\n\n" + self.get_format_string(escape_json=True)
|
"""Pydantic output parser."""
import json
from typing import Any, Generic, List, Optional, Type
from llama_index.core.output_parsers.base import ChainableOutputParser
from llama_index.core.output_parsers.utils import extract_json_str
from llama_index.core.types import Model
PYDANTIC_FORMAT_TMPL = """
Here's a JSON schema to follow:
{schema}
Output a valid JSON object but do not repeat the schema.
"""
class PydanticOutputParser(ChainableOutputParser, Generic[Model]):
"""Pydantic Output Parser.
Args:
output_cls (BaseModel): Pydantic output class.
"""
def __init__(
self,
output_cls: Type[Model],
excluded_schema_keys_from_format: Optional[List] = None,
pydantic_format_tmpl: str = PYDANTIC_FORMAT_TMPL,
) -> None:
"""Init params."""
self._output_cls = output_cls
self._excluded_schema_keys_from_format = excluded_schema_keys_from_format or []
self._pydantic_format_tmpl = pydantic_format_tmpl
@property
def output_cls(self) -> Type[Model]:
return self._output_cls
@property
def format_string(self) -> str:
"""Format string."""
return self.get_format_string(escape_json=True)
def get_format_string(self, escape_json: bool = True) -> str:
"""Format string."""
schema_dict = self._output_cls.model_json_schema()
for key in self._excluded_schema_keys_from_format:
del schema_dict[key]
schema_str = json.dumps(schema_dict)
output_str = self._pydantic_format_tmpl.format(schema=schema_str)
if escape_json:
return output_str.replace("{", "{{").replace("}", "}}")
else:
return output_str
def parse(self, text: str) -> Any:
"""Parse, validate, and correct errors programmatically."""
json_str = extract_json_str(text)
return self._output_cls.model_validate_json(json_str)
def format(self, query: str) -> str:
"""Format a query with structured output formatting instructions."""
return query + "\n\n" + self.get_format_string(escape_json=True)
|
class WorkflowValidationError(Exception):
pass
class WorkflowTimeoutError(Exception):
pass
class WorkflowRuntimeError(Exception):
pass
class WorkflowDone(Exception):
pass
class WorkflowCancelledByUser(Exception):
pass
class WorkflowStepDoesNotExistError(Exception):
pass
class WorkflowConfigurationError(Exception):
pass
|
class WorkflowValidationError(Exception):
pass
class WorkflowTimeoutError(Exception):
pass
class WorkflowRuntimeError(Exception):
pass
class WorkflowDone(Exception):
pass
class WorkflowCancelledByUser(Exception):
pass
class WorkflowStepDoesNotExistError(Exception):
pass
|
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
data = nr.randint(-50, 51, (n_samples, n_features))
print("K-Means")
tstart = time()
kmeans = KMeans(init="k-means++", n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results["kmeans_speed"].append(delta)
results["kmeans_quality"].append(kmeans.inertia_)
print("Fast K-Means")
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(
init="k-means++", n_clusters=10, batch_size=chunk
)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array(
[
[1, 1],
[-1, -1],
[1, -1],
[-1, 1],
[0.5, 0.5],
[0.75, -0.5],
[-1, 0.75],
[1, 0],
]
)
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
print("Fast K-Means")
tstart = time()
mbkmeans = MiniBatchKMeans(init="k-means++", n_clusters=8, batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(50, 150, 5).astype(int)
features_range = np.linspace(150, 50000, 5).astype(int)
chunks = np.linspace(500, 10000, 15).astype(int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" in label]]
)
max_inertia = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" not in label]]
)
fig = plt.figure("scikit-learn K-Means benchmark results")
for c, (label, timings) in zip("brcy", sorted(results.items())):
if "speed" in label:
ax = fig.add_subplot(2, 2, 1, projection="3d")
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection="3d")
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
i = 0
for c, (label, timings) in zip("br", sorted(results_2.items())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel("Chunks")
ax.set_ylabel(label)
plt.show()
|
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
data = nr.randint(-50, 51, (n_samples, n_features))
print("K-Means")
tstart = time()
kmeans = KMeans(init="k-means++", n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results["kmeans_speed"].append(delta)
results["kmeans_quality"].append(kmeans.inertia_)
print("Fast K-Means")
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(
init="k-means++", n_clusters=10, batch_size=chunk
)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array(
[
[1, 1],
[-1, -1],
[1, -1],
[-1, 1],
[0.5, 0.5],
[0.75, -0.5],
[-1, 0.75],
[1, 0],
]
)
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
print("Fast K-Means")
tstart = time()
mbkmeans = MiniBatchKMeans(init="k-means++", n_clusters=8, batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
if __name__ == "__main__":
from mpl_toolkits.mplot3d import axes3d # noqa register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(int)
features_range = np.linspace(150, 50000, 5).astype(int)
chunks = np.linspace(500, 10000, 15).astype(int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" in label]]
)
max_inertia = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" not in label]]
)
fig = plt.figure("scikit-learn K-Means benchmark results")
for c, (label, timings) in zip("brcy", sorted(results.items())):
if "speed" in label:
ax = fig.add_subplot(2, 2, 1, projection="3d")
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection="3d")
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
i = 0
for c, (label, timings) in zip("br", sorted(results_2.items())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel("Chunks")
ax.set_ylabel(label)
plt.show()
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `Colab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = tv_tensors.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with TVTensor classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`tv_tensor_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all TVTensors are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = tv_tensors.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with TVTensor classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`tv_tensor_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all TVTensors are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradidents of each parameters. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
DEEPSEEK_MODEL_TO_CONTEXT_WINDOW = {
"deepseek-chat": 64000,
"deepseek-reasoner": 64000,
}
FUNCTION_CALLING_MODELS = {"deepseek-chat"}
def get_context_window(model: str) -> int:
return DEEPSEEK_MODEL_TO_CONTEXT_WINDOW.get(model, 64000)
|
DEEPSEEK_MODEL_TO_CONTEXT_WINDOW = {
"deepseek-chat": 64000,
"deepseek-reasoner": 64000,
}
def get_context_window(model: str) -> int:
return DEEPSEEK_MODEL_TO_CONTEXT_WINDOW.get(model, 64000)
|
from typing import Any, Optional
from typing_inspect import get_args, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
|
from typing import Any
from typing_inspect import get_args, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
|
from typing import List, Iterable
import collections
import string
import os
import json
import logging
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> List[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from typing import List, Iterable
import collections
import string
import os
import json
import logging
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str) -> List[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic(
pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]],
llm: BaseLanguageModel,
system_message: str = _EXTRACTION_TEMPLATE,
) -> Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("user", "{input}"),
]
)
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{"type": "function", "function": d} for d in functions]
model = llm.bind(tools=tools)
return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
|
from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic(
pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]],
llm: BaseLanguageModel,
system_message: str = _EXTRACTION_TEMPLATE,
) -> Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("user", "{input}"),
]
)
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{"type": "function", "function": d} for d in functions]
model = llm.bind(tools=tools)
chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
return chain
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python, R, and C packages. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory.
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X, y)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python package. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory.
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X, y)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.built = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
from __future__ import annotations
from pathlib import Path
import torch
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF
from tests.sparse_encoder.utils import sparse_allclose
def test_idf_padding_ignored(inference_free_splade_bert_tiny_model: SparseEncoder) -> None:
model = inference_free_splade_bert_tiny_model
input_texts = ["This is a test input", "This is a considerably longer test input to check padding behavior."]
# Encode the input texts
batch_embeddings = model.encode_query(input_texts, save_to_cpu=True)
single_embeddings = [model.encode_query(text, save_to_cpu=True) for text in input_texts]
single_embeddings = torch.stack(single_embeddings)
# Check that the batch embeddings match the single embeddings
assert sparse_allclose(
batch_embeddings, single_embeddings, atol=1e-6
), "Batch encoding does not match single encoding."
def test_idf_save_load(inference_free_splade_bert_tiny_model: SparseEncoder, tmp_path: Path) -> None:
model = inference_free_splade_bert_tiny_model
# Define test inputs
test_inputs = ["This is a simple test.", "Another example text for testing."]
# Get embeddings before saving
original_embeddings = model.encode_query(test_inputs, save_to_cpu=True)
# Save the model
save_path = tmp_path / "test_idf_model"
model.save_pretrained(save_path)
# Load the model
loaded_model = SparseEncoder(str(save_path))
# Get embeddings after loading
loaded_embeddings = loaded_model.encode_query(test_inputs, save_to_cpu=True)
# Check if embeddings are the same before and after save/load
assert sparse_allclose(original_embeddings, loaded_embeddings, atol=1e-6), "Embeddings changed after save and load"
# Check if IDF weights are maintained after loading
assert isinstance(loaded_model[0].query_0_IDF, IDF), "IDF component missing after loading"
assert torch.allclose(
model[0].query_0_IDF.weight, loaded_model[0].query_0_IDF.weight
), "IDF weights changed after save and load"
|
from __future__ import annotations
import torch
from sentence_transformers import SparseEncoder
from tests.sparse_encoder.utils import sparse_allclose
def test_idf_padding_ignored(inference_free_splade_bert_tiny_model: SparseEncoder):
model = inference_free_splade_bert_tiny_model
input_texts = ["This is a test input", "This is a considerably longer test input to check padding behavior."]
# Encode the input texts
batch_embeddings = model.encode_query(input_texts, save_to_cpu=True)
single_embeddings = [model.encode_query(text, save_to_cpu=True) for text in input_texts]
single_embeddings = torch.stack(single_embeddings)
# Check that the batch embeddings match the single embeddings
assert sparse_allclose(
batch_embeddings, single_embeddings, atol=1e-6
), "Batch encoding does not match single encoding."
|
"""
PostgresML index.
An index that is built on top of PostgresML.
"""
import logging
from typing import Any, List, Optional, Dict
from llama_index.core.async_utils import run_async_tasks
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.indices.managed.postgresml.base import PostgresMLIndex
_logger = logging.getLogger(__name__)
class PostgresMLRetriever(BaseRetriever):
"""
PostgresML Retriever.
Args:
index (PostgresMLIndex): the PostgresML Index
"""
def __init__(
self,
index: PostgresMLIndex,
callback_manager: Optional[CallbackManager] = None,
pgml_query: Optional[Dict[str, Any]] = None,
limit: Optional[int] = 5,
rerank: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
"""Initialize params."""
self._index = index
self._pgml_query = pgml_query
self._limit = limit
self._rerank = rerank
super().__init__(callback_manager)
def _retrieve(
self,
query_bundle: Optional[QueryBundle] = None,
**kwargs: Any,
) -> List[NodeWithScore]:
return run_async_tasks([self._aretrieve(query_bundle, **kwargs)])[0]
async def _aretrieve(
self,
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
async def do_vector_search():
if self._pgml_query:
return await self._index.collection.vector_search(
self._pgml_query,
self._index.pipeline,
)
else:
if not query_bundle:
raise Exception(
"Must provide either query or query_bundle to retrieve and aretrieve"
)
if self._rerank is not None:
self._rerank = self._rerank | {"query": query_bundle.query_str}
return await self._index.collection.vector_search(
{
"query": {
"fields": {
"content": {
"query": query_bundle.query_str,
"parameters": {"prompt": "query: "},
}
}
},
"rerank": self._rerank,
"limit": self._limit,
},
self._index.pipeline,
)
results = await do_vector_search()
return [
NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["score"],
)
if self._rerank is None
else NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["rerank_score"],
)
for r in results
]
|
"""PostgresML index.
An index that is built on top of PostgresML.
"""
import logging
from typing import Any, List, Optional, Dict
from llama_index.core.async_utils import run_async_tasks
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.indices.managed.postgresml.base import PostgresMLIndex
_logger = logging.getLogger(__name__)
class PostgresMLRetriever(BaseRetriever):
"""PostgresML Retriever.
Args:
index (PostgresMLIndex): the PostgresML Index
"""
def __init__(
self,
index: PostgresMLIndex,
callback_manager: Optional[CallbackManager] = None,
pgml_query: Optional[Dict[str, Any]] = None,
limit: Optional[int] = 5,
rerank: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
"""Initialize params."""
self._index = index
self._pgml_query = pgml_query
self._limit = limit
self._rerank = rerank
super().__init__(callback_manager)
def _retrieve(
self,
query_bundle: Optional[QueryBundle] = None,
**kwargs: Any,
) -> List[NodeWithScore]:
return run_async_tasks([self._aretrieve(query_bundle, **kwargs)])[0]
async def _aretrieve(
self,
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
async def do_vector_search():
if self._pgml_query:
return await self._index.collection.vector_search(
self._pgml_query,
self._index.pipeline,
)
else:
if not query_bundle:
raise Exception(
"Must provide either query or query_bundle to retrieve and aretrieve"
)
if self._rerank is not None:
self._rerank = self._rerank | {"query": query_bundle.query_str}
return await self._index.collection.vector_search(
{
"query": {
"fields": {
"content": {
"query": query_bundle.query_str,
"parameters": {"prompt": "query: "},
}
}
},
"rerank": self._rerank,
"limit": self._limit,
},
self._index.pipeline,
)
results = await do_vector_search()
return [
NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["score"],
)
if self._rerank is None
else NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["rerank_score"],
)
for r in results
]
|
import pytest
from typing import Dict, List
from llama_index.core.llms import ChatMessage, MessageRole, TextBlock, AudioBlock
from llama_index.voice_agents.elevenlabs.utils import (
callback_agent_message,
callback_agent_message_correction,
callback_latency_measurement,
callback_user_message,
get_messages_from_chat,
)
data = b"fake_audio_data"
@pytest.fixture()
def messages() -> Dict[int, List[ChatMessage]]:
return {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
]
}
@pytest.fixture()
def latencies() -> List[int]:
return [1, 3]
def test_agent_message(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
callback_agent_message(messages=local_messages, message_id=1, text="Hello")
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
]
} == local_messages
callback_agent_message(messages=local_messages, message_id=2, text="Hello")
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
],
2: [ChatMessage(role=MessageRole.ASSISTANT, blocks=[TextBlock(text="Hello")])],
} == local_messages
callback_agent_message(messages=local_messages, message_id=2, audio=data)
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
],
2: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
)
],
} == local_messages
def test_user_message(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
callback_user_message(messages=local_messages, message_id=1, audio=data)
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
]
} == local_messages
callback_user_message(messages=local_messages, message_id=2, text="Hello")
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
],
2: [ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")])],
} == local_messages
callback_user_message(messages=local_messages, message_id=2, audio=data)
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
],
2: [
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
)
],
} == local_messages
def test_agent_message_correction(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
local_messages[1][0].blocks.append(TextBlock(text="Hell"))
callback_agent_message_correction(
messages=local_messages, message_id=1, text="Hello"
)
assert local_messages[1][0].blocks[1].text == "Hello"
def test_latencies(latencies: List[int]):
local_lats = latencies.copy()
callback_latency_measurement(local_lats, 3)
callback_latency_measurement(local_lats, 9)
assert local_lats == [*latencies, 3, 9]
def test_get_messages_from_chat(messages: Dict[int, List[ChatMessage]]) -> None:
assert get_messages_from_chat(messages) == messages[1]
|
import pytest
from typing import Dict, List
from llama_index.core.llms import ChatMessage, MessageRole, TextBlock, AudioBlock
from llama_index.voice_agents.elevenlabs.utils import (
callback_agent_message,
callback_agent_message_correction,
callback_latency_measurement,
callback_user_message,
)
data = b"fake_audio_data"
@pytest.fixture()
def messages() -> Dict[int, List[ChatMessage]]:
return {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
]
}
@pytest.fixture()
def latencies() -> List[int]:
return [1, 3]
def test_agent_message(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
callback_agent_message(messages=local_messages, message_id=1, text="Hello")
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
]
} == local_messages
callback_agent_message(messages=local_messages, message_id=2, text="Hello")
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
],
2: [ChatMessage(role=MessageRole.ASSISTANT, blocks=[TextBlock(text="Hello")])],
} == local_messages
callback_agent_message(messages=local_messages, message_id=2, audio=data)
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
],
2: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
)
],
} == local_messages
def test_user_message(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
callback_user_message(messages=local_messages, message_id=1, audio=data)
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
]
} == local_messages
callback_user_message(messages=local_messages, message_id=2, text="Hello")
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
],
2: [ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")])],
} == local_messages
callback_user_message(messages=local_messages, message_id=2, audio=data)
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
],
2: [
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
)
],
} == local_messages
def test_agent_message_correction(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
local_messages[1][0].blocks.append(TextBlock(text="Hell"))
callback_agent_message_correction(
messages=local_messages, message_id=1, text="Hello"
)
assert local_messages[1][0].blocks[1].text == "Hello"
def test_latencies(latencies: List[int]):
local_lats = latencies.copy()
callback_latency_measurement(local_lats, 3)
callback_latency_measurement(local_lats, 9)
assert local_lats == [*latencies, 3, 9]
|
from typing import Dict, Type
from llama_index.core.node_parser.file.html import HTMLNodeParser
from llama_index.core.node_parser.file.json import JSONNodeParser
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.node_parser.file.simple_file import SimpleFileNodeParser
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.relational.hierarchical import (
HierarchicalNodeParser,
)
from llama_index.core.node_parser.text.code import CodeSplitter
from llama_index.core.node_parser.text.sentence import SentenceSplitter
from llama_index.core.node_parser.text.sentence_window import (
SentenceWindowNodeParser,
)
from llama_index.core.node_parser.text.token import TokenTextSplitter
all_node_parsers: Dict[str, Type[NodeParser]] = {
HTMLNodeParser.class_name(): HTMLNodeParser,
JSONNodeParser.class_name(): JSONNodeParser,
MarkdownNodeParser.class_name(): MarkdownNodeParser,
SimpleFileNodeParser.class_name(): SimpleFileNodeParser,
HierarchicalNodeParser.class_name(): HierarchicalNodeParser,
CodeSplitter.class_name(): CodeSplitter,
SentenceSplitter.class_name(): SentenceSplitter,
TokenTextSplitter.class_name(): TokenTextSplitter,
SentenceWindowNodeParser.class_name(): SentenceWindowNodeParser,
}
def load_parser(
data: dict,
) -> NodeParser:
if isinstance(data, NodeParser):
return data
parser_name = data.get("class_name")
if parser_name is None:
raise ValueError("Parser loading requires a class_name")
if parser_name not in all_node_parsers:
raise ValueError(f"Invalid parser name: {parser_name}")
else:
return all_node_parsers[parser_name].from_dict(data)
|
from typing import Dict, Type
from llama_index.core.node_parser.file.html import HTMLNodeParser
from llama_index.core.node_parser.file.json import JSONNodeParser
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.node_parser.file.simple_file import SimpleFileNodeParser
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.relational.hierarchical import (
HierarchicalNodeParser,
)
from llama_index.core.node_parser.text.code import CodeSplitter
from llama_index.core.node_parser.text.sentence import SentenceSplitter
from llama_index.core.node_parser.text.sentence_window import (
SentenceWindowNodeParser,
)
from llama_index.core.node_parser.text.token import TokenTextSplitter
all_node_parsers: Dict[str, Type[NodeParser]] = {
HTMLNodeParser.class_name(): HTMLNodeParser,
JSONNodeParser.class_name(): JSONNodeParser,
MarkdownNodeParser.class_name(): MarkdownNodeParser,
SimpleFileNodeParser.class_name(): SimpleFileNodeParser,
HierarchicalNodeParser.class_name(): HierarchicalNodeParser,
CodeSplitter.class_name(): CodeSplitter,
SentenceSplitter.class_name(): SentenceSplitter,
TokenTextSplitter.class_name(): TokenTextSplitter,
SentenceWindowNodeParser.class_name(): SentenceWindowNodeParser,
}
def load_parser(
data: dict,
) -> NodeParser:
if isinstance(data, NodeParser):
return data
parser_name = data.get("class_name", None)
if parser_name is None:
raise ValueError("Parser loading requires a class_name")
if parser_name not in all_node_parsers:
raise ValueError(f"Invalid parser name: {parser_name}")
else:
return all_node_parsers[parser_name].from_dict(data)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.structures import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('current_visualizer')
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, {}, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.structures import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, {}, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
from datetime import datetime
import pytest
from autogpt_libs.supabase_integration_credentials_store.store import openai_credentials
from prisma.models import UserBlockCredit
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import UserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = UserCredit(REFILL_VALUE)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
month1 = datetime(2022, 1, 15)
month2 = datetime(2022, 2, 15)
user_credit.time_now = lambda: month2
month2credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: month1
month1credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: month2
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
# Clear all transactions within the month
await UserBlockCredit.prisma().update_many(
where={
"userId": DEFAULT_USER_ID,
"createdAt": {
"gte": datetime(2022, 2, 1),
"lt": datetime(2022, 3, 1),
},
},
data={"isActive": False},
)
user_credit.time_now = lambda: datetime(2022, 2, 15)
balance = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime
import pytest
from prisma.models import UserBlockCredit
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import UserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = UserCredit(REFILL_VALUE)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo"},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
month1 = datetime(2022, 1, 15)
month2 = datetime(2022, 2, 15)
user_credit.time_now = lambda: month2
month2credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: month1
month1credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: month2
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
# Clear all transactions within the month
await UserBlockCredit.prisma().update_many(
where={
"userId": DEFAULT_USER_ID,
"createdAt": {
"gte": datetime(2022, 2, 1),
"lt": datetime(2022, 3, 1),
},
},
data={"isActive": False},
)
user_credit.time_now = lambda: datetime(2022, 2, 15)
balance = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from typing import Any
import pytest
from langchain_tests.conftest import CustomPersister, CustomSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
def remove_request_headers(request: Any) -> Any:
for k in request.headers:
request.headers[k] = "**REDACTED**"
return request
def remove_response_headers(response: dict) -> dict:
for k in response["headers"]:
response["headers"][k] = "**REDACTED**"
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""Extend the default configuration coming from langchain_tests."""
config = _base_vcr_config.copy()
config["before_record_request"] = remove_request_headers
config["before_record_response"] = remove_response_headers
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
def pytest_recording_configure(config: dict, vcr: VCR) -> None:
vcr.register_persister(CustomPersister())
vcr.register_serializer("yaml.gz", CustomSerializer())
|
from typing import Any
import pytest
from langchain_tests.conftest import CustomPersister, CustomSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
def remove_request_headers(request: Any) -> Any:
for k in request.headers:
request.headers[k] = "**REDACTED**"
return request
def remove_response_headers(response: dict) -> dict:
for k in response["headers"]:
response["headers"][k] = "**REDACTED**"
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""
Extend the default configuration coming from langchain_tests.
"""
config = _base_vcr_config.copy()
config["before_record_request"] = remove_request_headers
config["before_record_response"] = remove_response_headers
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
def pytest_recording_configure(config: dict, vcr: VCR) -> None:
vcr.register_persister(CustomPersister())
vcr.register_serializer("yaml.gz", CustomSerializer())
|
from typing import TYPE_CHECKING
from .compass import CompassWebhookManager
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
if TYPE_CHECKING:
from ..providers import ProviderName
from ._base import BaseWebhooksManager
# --8<-- [start:WEBHOOK_MANAGERS_BY_NAME]
WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["BaseWebhooksManager"]] = {
handler.PROVIDER_NAME: handler
for handler in [
CompassWebhookManager,
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME]
__all__ = ["WEBHOOK_MANAGERS_BY_NAME"]
|
from typing import TYPE_CHECKING
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseWebhooksManager
# --8<-- [start:WEBHOOK_MANAGERS_BY_NAME]
WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["BaseWebhooksManager"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME]
__all__ = ["WEBHOOK_MANAGERS_BY_NAME"]
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
container_tag: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(container_tag)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
container_tag=args.container_tag,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --container-tag CONTAINER_TAG [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--container-tag",
type=str,
required=True,
help=(
"Container tag to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
container_id: str,
command_args: list[str],
*,
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(container_id)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(args: argparse.Namespace) -> None:
run_args = parse_run_args(args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
args.container_id,
args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --container-id CONTAINER_ID [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--container-id",
type=str,
required=True,
help="String ID of the container to run.",
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(parsed_args)
|
from __future__ import annotations
from typing import Any, Dict, Iterator, List
from urllib.parse import urlparse
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, PrivateAttr
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class MlflowEmbeddings(Embeddings, BaseModel):
"""Embedding LLMs in MLflow.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.embeddings import MlflowEmbeddings
embeddings = MlflowEmbeddings(
target_uri="http://localhost:5000",
endpoint="embeddings",
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
_client: Any = PrivateAttr()
"""The parameters to use for queries."""
query_params: Dict[str, str] = {}
"""The parameters to use for documents."""
documents_params: Dict[str, str] = {}
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
f"Please run `pip install mlflow{self._mlflow_extras}` to install "
"required dependencies."
) from e
@property
def _mlflow_extras(self) -> str:
return "[genai]"
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
def embed(self, texts: List[str], params: Dict[str, str]) -> List[List[float]]:
embeddings: List[List[float]] = []
for txt in _chunk(texts, 20):
resp = self._client.predict(
endpoint=self.endpoint,
inputs={"input": txt, **params},
)
embeddings.extend(r["embedding"] for r in resp["data"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed(texts, params=self.documents_params)
def embed_query(self, text: str) -> List[float]:
return self.embed([text], params=self.query_params)[0]
class MlflowCohereEmbeddings(MlflowEmbeddings):
"""Cohere embedding LLMs in MLflow."""
query_params: Dict[str, str] = {"input_type": "search_query"}
documents_params: Dict[str, str] = {"input_type": "search_document"}
|
from __future__ import annotations
from typing import Any, Dict, Iterator, List
from urllib.parse import urlparse
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, PrivateAttr
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class MlflowEmbeddings(Embeddings, BaseModel):
"""Embedding LLMs in MLflow.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.embeddings import MlflowEmbeddings
embeddings = MlflowEmbeddings(
target_uri="http://localhost:5000",
endpoint="embeddings",
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
_client: Any = PrivateAttr()
"""The parameters to use for queries."""
query_params: Dict[str, str] = {}
"""The parameters to use for documents."""
documents_params: Dict[str, str] = {}
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
f"Please run `pip install mlflow{self._mlflow_extras}` to install "
"required dependencies."
) from e
@property
def _mlflow_extras(self) -> str:
return "[genai]"
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
def embed(self, texts: List[str], params: Dict[str, str]) -> List[List[float]]:
embeddings: List[List[float]] = []
for txt in _chunk(texts, 20):
resp = self._client.predict(
endpoint=self.endpoint,
inputs={"input": txt, **params}, # type: ignore[arg-type]
)
embeddings.extend(r["embedding"] for r in resp["data"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed(texts, params=self.documents_params)
def embed_query(self, text: str) -> List[float]:
return self.embed([text], params=self.query_params)[0]
class MlflowCohereEmbeddings(MlflowEmbeddings):
"""Cohere embedding LLMs in MLflow."""
query_params: Dict[str, str] = {"input_type": "search_query"}
documents_params: Dict[str, str] = {"input_type": "search_document"}
|
import json
from typing import Dict
import pytest
from jina.orchestrate.deployments.config.k8slib.kubernetes_tools import get_yaml
@pytest.mark.parametrize(
['template', 'params'],
[
('namespace', {'name': 'test-ns'}),
('service', {'name': 'test-svc'}),
('deployment-executor', {'name': 'test-dep'}),
(
'configmap',
{
'name': 'test-configmap-executor',
'namespace': 'test-configmap',
'data': {'k1': 'v1', 'k2': 'v2'},
},
),
],
)
def test_get(template: str, params: Dict):
config = get_yaml(template=template, params=params)
for v in params.values():
if isinstance(v, str):
assert v in json.dumps(config)
elif isinstance(v, dict):
for sub_key, sub_v in v.items():
assert config['data'][sub_key] == sub_v
@pytest.mark.parametrize('template', ['deployment-executor'])
def test_get_deployment_with_device_plugin(template, monkeypatch):
params = {
'name': 'test-name',
'namespace': 'test-namespace',
'image': 'test-image',
'replicas': 1,
'command': 'test-command',
'args': 'test-args',
'port': 1234,
'port_out': 1234,
'port_ctrl': 1234,
'pull_policy': 1234,
'device_plugins': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3},
}
config = get_yaml(template, params)
assert config['spec']['template']['spec']['containers'][0]['resources'] == {
'limits': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3}
}
|
import json
from typing import Dict
import pytest
from jina.orchestrate.deployments.config.k8slib.kubernetes_tools import get_yaml
@pytest.mark.parametrize(
['template', 'params'],
[
('namespace', {'name': 'test-ns'}),
('service', {'name': 'test-svc'}),
('deployment', {'name': 'test-dep'}),
(
'configmap',
{
'name': 'test-configmap-executor',
'namespace': 'test-configmap',
'data': {'k1': 'v1', 'k2': 'v2'},
},
),
],
)
def test_get(template: str, params: Dict):
config = get_yaml(template=template, params=params)
for v in params.values():
if isinstance(v, str):
assert v in json.dumps(config)
elif isinstance(v, dict):
for sub_key, sub_v in v.items():
assert config['data'][sub_key] == sub_v
@pytest.mark.parametrize('template', ['deployment'])
def test_get_deployment_with_device_plugin(template, monkeypatch):
params = {
'name': 'test-name',
'namespace': 'test-namespace',
'image': 'test-image',
'replicas': 1,
'command': 'test-command',
'args': 'test-args',
'port': 1234,
'port': 1234,
'port_out': 1234,
'port_ctrl': 1234,
'pull_policy': 1234,
'device_plugins': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3},
}
config = get_yaml(template, params)
assert config['spec']['template']['spec']['containers'][0]['resources'] == {
'limits': {'hardware-vendor.example/foo': 2, 'nvidia.com/gpu:': 3}
}
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Dict, Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Dict, Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='file_2.mp4')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.opaqueprompts import desanitize, sanitize
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"sanitize": "langchain_community.utilities.opaqueprompts",
"desanitize": "langchain_community.utilities.opaqueprompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"desanitize",
"sanitize",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.opaqueprompts import desanitize, sanitize
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"sanitize": "langchain_community.utilities.opaqueprompts",
"desanitize": "langchain_community.utilities.opaqueprompts",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"sanitize",
"desanitize",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
PKG2PROJECT = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction2': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
}
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in PKG2PROJECT, 'mmengine does not support to load ' \
f'{package} config.'
package = PKG2PROJECT[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
# Copyright (c) OpenMMLab. All rights reserved.
import ast
import os.path as osp
import re
import warnings
from typing import Tuple
from mmengine.fileio import load
from mmengine.utils import check_file_exist
PKG2PROJECT = {
'mmcls': 'mmcls',
'mmdet': 'mmdet',
'mmdet3d': 'mmdet3d',
'mmseg': 'mmsegmentation',
'mmaction2': 'mmaction2',
'mmtrack': 'mmtrack',
'mmpose': 'mmpose',
'mmedit': 'mmedit',
'mmocr': 'mmocr',
'mmgen': 'mmgen',
'mmfewshot': 'mmfewshot',
'mmrazor': 'mmrazor',
'mmflow': 'mmflow',
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
}
def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
"""Get target meta information from all 'metafile.yml' defined in `mode-
index.yml` of external package.
Args:
package_path (str): Path of external package.
cfg_path (str): Name of experiment config.
Returns:
dict: Meta information of target experiment.
"""
meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
meta_index = load(meta_index_path)
cfg_dict = dict()
for meta_path in meta_index['Import']:
meta_path = osp.join(package_path, '.mim', meta_path)
cfg_meta = load(meta_path)
for model_cfg in cfg_meta['Models']:
if 'Config' not in model_cfg:
warnings.warn(f'There is not `Config` define in {model_cfg}')
continue
cfg_name = model_cfg['Config'].partition('/')[-1]
# Some config could have multiple weights, we only pick the
# first one.
if cfg_name in cfg_dict:
continue
cfg_dict[cfg_name] = model_cfg
if cfg_path not in cfg_dict:
raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
f'{cfg_path}')
return cfg_dict[cfg_path]
def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
"""Get config path of external package.
Args:
package_path (str): Path of external package.
cfg_file (str): Name of experiment config.
Returns:
str: Absolute config path from external package.
"""
cfg_file = cfg_file.split('.')[0]
model_cfg = _get_cfg_metainfo(package_path, cfg_file)
cfg_path = osp.join(package_path, model_cfg['Config'])
check_file_exist(cfg_path)
return cfg_path
def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
"""Get base config path of external package.
Args:
package_path (str): Path of external package.
cfg_name (str): External relative config path with 'package::'.
Returns:
str: Absolute config path from external package.
"""
cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
check_file_exist(cfg_path)
return cfg_path
def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
"""Get package name and relative config path.
Args:
cfg_path (str): External relative config path with 'package::'.
Returns:
Tuple[str, str]: Package name and config path.
"""
if re.match(r'\w*::\w*/\w*', cfg_path) is None:
raise ValueError(
'`_get_package_and_cfg_path` is used for get external package, '
'please specify the package name and relative config path, just '
'like `mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py`')
package_cfg = cfg_path.split('::')
if len(package_cfg) > 2:
raise ValueError('`::` should only be used to separate package and '
'config name, but found multiple `::` in '
f'{cfg_path}')
package, cfg_path = package_cfg
assert package in PKG2PROJECT, 'mmengine does not support to load ' \
f'{package} config.'
package = PKG2PROJECT[package]
return package, cfg_path
class RemoveAssignFromAST(ast.NodeTransformer):
"""Remove Assign node if the target's name match the key.
Args:
key (str): The target name of the Assign node.
"""
def __init__(self, key):
self.key = key
def visit_Assign(self, node):
if (isinstance(node.targets[0], ast.Name)
and node.targets[0].id == self.key):
return None
else:
return node
|
from llama_index.core import Document
import asyncio
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.asyncio()
async def test_add_data(monkeypatch):
# Instantiate cognee GraphRAG
cogneeGraphRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
async def mock_add_return(add, dataset_name):
return True
import cognee
monkeypatch.setattr(cognee, "add", mock_add_return)
# Gather documents to add to GraphRAG
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
await cogneeGraphRAG.add(documents, "test")
await cogneeGraphRAG.add(documents[0], "test")
if __name__ == "__main__":
asyncio.run(test_add_data())
|
from llama_index.core import Document
import asyncio
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.asyncio()
async def test_add_data(monkeypatch):
# Instantiate cognee GraphRAG
cogneeGraphRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
db_name="cognee_db",
)
# Mock logging to graphistry
async def mock_add_return(add, dataset_name):
return True
import cognee
monkeypatch.setattr(cognee, "add", mock_add_return)
# Gather documents to add to GraphRAG
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
await cogneeGraphRAG.add(documents, "test")
await cogneeGraphRAG.add(documents[0], "test")
if __name__ == "__main__":
asyncio.run(test_add_data())
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import traceback
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from datetime import datetime
from datasets import load_dataset
import logging
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
from sentence_transformers import SentenceTransformer, InputExample, LoggingHandler, losses, models, util
from torch.utils.data import DataLoader
from sentence_transformers.evaluation import TripletEvaluator
from datetime import datetime
from zipfile import ZipFile
import csv
import logging
import os
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
dataset_path = "datasets/wikipedia-sections"
if not os.path.exists(dataset_path):
os.makedirs(dataset_path, exist_ok=True)
filepath = os.path.join(dataset_path, "wikipedia-sections-triplets.zip")
util.http_get("https://sbert.net/datasets/wikipedia-sections-triplets.zip", filepath)
with ZipFile(filepath, "r") as zip:
zip.extractall(dataset_path)
### Create a torch.DataLoader that passes training batch instances to our model
train_batch_size = 16
output_path = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
num_epochs = 1
### Configure sentence transformers for training and train on the provided dataset
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
logger.info("Read Triplet train dataset")
train_examples = []
with open(os.path.join(dataset_path, "train.csv"), encoding="utf-8") as fIn:
reader = csv.DictReader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in reader:
train_examples.append(InputExample(texts=[row["Sentence1"], row["Sentence2"], row["Sentence3"]], label=0))
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
logger.info("Read Wikipedia Triplet dev dataset")
dev_examples = []
with open(os.path.join(dataset_path, "validation.csv"), encoding="utf-8") as fIn:
reader = csv.DictReader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in reader:
dev_examples.append(InputExample(texts=[row["Sentence1"], row["Sentence2"], row["Sentence3"]]))
if len(dev_examples) >= 1000:
break
evaluator = TripletEvaluator.from_input_examples(dev_examples, name="dev")
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) # 10% of train data
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=output_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
logger.info("Read test examples")
test_examples = []
with open(os.path.join(dataset_path, "test.csv"), encoding="utf-8") as fIn:
reader = csv.DictReader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
for row in reader:
test_examples.append(InputExample(texts=[row["Sentence1"], row["Sentence2"], row["Sentence3"]]))
model = SentenceTransformer(output_path)
test_evaluator = TripletEvaluator.from_input_examples(test_examples, name="test")
test_evaluator(model, output_path=output_path)
|
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import SymbolicScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
from keras.api import visualization
from keras.api import wrappers
# END DO NOT EDIT.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import SymbolicScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
from keras.api import visualization
# END DO NOT EDIT.
import os # isort: skip
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
from typing import NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DLoadResult(NamedTuple):
vertices: NdArray
faces: NdArray
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Mesh3DLoadResult:
"""
Load the data from the url into a named tuple of two NdArrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: named tuple of two NdArrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return Mesh3DLoadResult(vertices=vertices, faces=faces)
|
from typing import NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing import NdArray
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DLoadResult(NamedTuple):
vertices: NdArray
faces: NdArray
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Mesh3DLoadResult:
"""
Load the data from the url into a named tuple of two NdArrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: named tuple of two NdArrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return Mesh3DLoadResult(vertices=vertices, faces=faces)
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
async for r in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
r, error_on_no_tool_call=False
)
raw = r.raw.model_dump() if isinstance(r.raw, BaseModel) else r.raw
ctx.write_event_to_stream(
AgentStream(
delta=r.delta or "",
response=r.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
r, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(r.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = r.raw.model_dump() if isinstance(r.raw, BaseModel) else r.raw
return AgentOutput(
response=r.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for msg in scratchpad:
await memory.aput(msg)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
async for r in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
r, error_on_no_tool_call=False
)
raw = r.raw.model_dump() if isinstance(r.raw, BaseModel) else r.raw
ctx.write_event_to_stream(
AgentStream(
delta=r.delta or "",
response=r.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
r, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(r.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = r.raw.model_dump() if isinstance(r.raw, BaseModel) else r.raw
return AgentOutput(
response=r.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if tool_call_result.return_direct:
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for msg in scratchpad:
await memory.aput(msg)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet_v2 import ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2
from keras.src.applications.resnet_v2 import decode_predictions
from keras.src.applications.resnet_v2 import preprocess_input
|
import numpy as np
import pytest
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'dtype,result_type',
[
('int64', 'int64'),
('float64', 'float64'),
('int8', 'int8'),
('double', 'float64'),
],
)
def test_dtype(dtype, result_type):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == result_type
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'dtype,result_type',
[
('int64', 'int64'),
('float64', 'float64'),
('int8', 'int8'),
('double', 'float64'),
],
)
def test_dtype(dtype, result_type):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == result_type
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.