input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
import urllib
import numpy as np
import PIL
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.proto
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load_pil(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
img = url.load_pil()
assert isinstance(img, PIL.Image.Image)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.proto
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory,
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'.",
stacklevel=2,
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory,
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'.",
stacklevel=3,
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`TextDoc.url`), a str (`TextDoc.text`),
and an AnyEmbedding (`TextDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import TextDoc
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import TextDoc
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import TextDoc
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
bytes: Optional[bytes] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import Text
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import Text
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
bytes: Optional[bytes] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
_base_ = '../htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
|
_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from pathlib import Path
import pytest
from langchain_community.document_loaders import CSVLoader, DirectoryLoader, TextLoader
from langchain_community.document_loaders.helpers import detect_file_encodings
@pytest.mark.requires("chardet")
def test_loader_detect_encoding_text() -> None:
"""Test text loader."""
path = Path(__file__).parent.parent / "examples"
files = path.glob("**/*.txt")
loader = DirectoryLoader(str(path), glob="**/*.txt", loader_cls=TextLoader)
loader_detect_encoding = DirectoryLoader(
str(path),
glob="**/*.txt",
loader_kwargs={"autodetect_encoding": True},
loader_cls=TextLoader,
)
with pytest.raises((UnicodeDecodeError, RuntimeError)):
loader.load()
docs = loader_detect_encoding.load()
assert len(docs) == len(list(files))
@pytest.mark.requires("chardet")
def test_loader_detect_encoding_csv() -> None:
"""Test csv loader."""
path = Path(__file__).parent.parent / "examples"
files = path.glob("**/*.csv")
# Count the number of lines.
row_count = 0
for file in files:
encodings = detect_file_encodings(str(file))
for encoding in encodings:
try:
row_count += sum(1 for line in open(file, encoding=encoding.encoding))
break
except UnicodeDecodeError:
continue
# CSVLoader uses DictReader, and one line per file is a header,
# so subtract the number of files.
row_count -= 1
loader = DirectoryLoader(
str(path),
glob="**/*.csv",
loader_cls=CSVLoader,
)
loader_detect_encoding = DirectoryLoader(
str(path),
glob="**/*.csv",
loader_kwargs={"autodetect_encoding": True},
loader_cls=CSVLoader,
)
with pytest.raises((UnicodeDecodeError, RuntimeError)):
loader.load()
docs = loader_detect_encoding.load()
assert len(docs) == row_count
@pytest.mark.skip(reason="slow test")
@pytest.mark.requires("chardet")
def test_loader_detect_encoding_timeout(tmpdir: str) -> None:
path = Path(tmpdir)
file_path = str(path / "blob.txt")
# 2mb binary blob
with open(file_path, "wb") as f:
f.write(b"\x00" * 2_000_000)
with pytest.raises(TimeoutError):
detect_file_encodings(file_path, timeout=1)
detect_file_encodings(file_path, timeout=10)
|
from pathlib import Path
import pytest
from langchain_community.document_loaders import CSVLoader, DirectoryLoader, TextLoader
from langchain_community.document_loaders.helpers import detect_file_encodings
@pytest.mark.requires("chardet")
def test_loader_detect_encoding_text() -> None:
"""Test text loader."""
path = Path(__file__).parent.parent / "examples"
files = path.glob("**/*.txt")
loader = DirectoryLoader(str(path), glob="**/*.txt", loader_cls=TextLoader)
loader_detect_encoding = DirectoryLoader(
str(path),
glob="**/*.txt",
loader_kwargs={"autodetect_encoding": True},
loader_cls=TextLoader, # type: ignore
)
with pytest.raises((UnicodeDecodeError, RuntimeError)):
loader.load()
docs = loader_detect_encoding.load()
assert len(docs) == len(list(files))
@pytest.mark.requires("chardet")
def test_loader_detect_encoding_csv() -> None:
"""Test csv loader."""
path = Path(__file__).parent.parent / "examples"
files = path.glob("**/*.csv")
# Count the number of lines.
row_count = 0
for file in files:
encodings = detect_file_encodings(str(file))
for encoding in encodings:
try:
row_count += sum(1 for line in open(file, encoding=encoding.encoding))
break
except UnicodeDecodeError:
continue
# CSVLoader uses DictReader, and one line per file is a header,
# so subtract the number of files.
row_count -= 1
loader = DirectoryLoader(
str(path),
glob="**/*.csv",
loader_cls=CSVLoader, # type: ignore
)
loader_detect_encoding = DirectoryLoader(
str(path),
glob="**/*.csv",
loader_kwargs={"autodetect_encoding": True},
loader_cls=CSVLoader, # type: ignore
)
with pytest.raises((UnicodeDecodeError, RuntimeError)):
loader.load()
docs = loader_detect_encoding.load()
assert len(docs) == row_count
@pytest.mark.skip(reason="slow test")
@pytest.mark.requires("chardet")
def test_loader_detect_encoding_timeout(tmpdir: str) -> None:
path = Path(tmpdir)
file_path = str(path / "blob.txt")
# 2mb binary blob
with open(file_path, "wb") as f:
f.write(b"\x00" * 2_000_000)
with pytest.raises(TimeoutError):
detect_file_encodings(file_path, timeout=1)
detect_file_encodings(file_path, timeout=10)
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
# data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
# fp16 settings
fp16 = dict(loss_scale=512.)
|
"""Google Calendar reader."""
import datetime
import os
from typing import Any, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"]
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class GoogleCalendarReader(BaseReader):
"""
Google Calendar reader.
Reads events from Google Calendar
"""
def load_data(
self,
number_of_results: Optional[int] = 100,
start_date: Optional[Union[str, datetime.date]] = None,
) -> List[Document]:
"""
Load data from user's calendar.
Args:
number_of_results (Optional[int]): the number of events to return. Defaults to 100.
start_date (Optional[Union[str, datetime.date]]): the start date to return events from. Defaults to today.
"""
from googleapiclient.discovery import build
credentials = self._get_credentials()
service = build("calendar", "v3", credentials=credentials)
if start_date is None:
start_date = datetime.date.today()
elif isinstance(start_date, str):
start_date = datetime.date.fromisoformat(start_date)
start_datetime = datetime.datetime.combine(start_date, datetime.time.min)
start_datetime_utc = start_datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
events_result = (
service.events()
.list(
calendarId="primary",
timeMin=start_datetime_utc,
maxResults=number_of_results,
singleEvents=True,
orderBy="startTime",
)
.execute()
)
events = events_result.get("items", [])
if not events:
return []
results = []
for event in events:
if "dateTime" in event["start"]:
start_time = event["start"]["dateTime"]
else:
start_time = event["start"]["date"]
if "dateTime" in event["end"]:
end_time = event["end"]["dateTime"]
else:
end_time = event["end"]["date"]
event_string = f"Status: {event['status']}, "
event_string += f"Summary: {event['summary']}, "
event_string += f"Start time: {start_time}, "
event_string += f"End time: {end_time}, "
organizer = event.get("organizer", {})
display_name = organizer.get("displayName", "N/A")
email = organizer.get("email", "N/A")
if display_name != "N/A":
event_string += f"Organizer: {display_name} ({email})"
else:
event_string += f"Organizer: {email}"
results.append(Document(text=event_string))
return results
def _get_credentials(self) -> Any:
"""
Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
creds = None
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", SCOPES
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
if __name__ == "__main__":
reader = GoogleCalendarReader()
print(reader.load_data())
|
"""Google Calendar reader."""
import datetime
import os
from typing import Any, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"]
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class GoogleCalendarReader(BaseReader):
"""Google Calendar reader.
Reads events from Google Calendar
"""
def load_data(
self,
number_of_results: Optional[int] = 100,
start_date: Optional[Union[str, datetime.date]] = None,
) -> List[Document]:
"""Load data from user's calendar.
Args:
number_of_results (Optional[int]): the number of events to return. Defaults to 100.
start_date (Optional[Union[str, datetime.date]]): the start date to return events from. Defaults to today.
"""
from googleapiclient.discovery import build
credentials = self._get_credentials()
service = build("calendar", "v3", credentials=credentials)
if start_date is None:
start_date = datetime.date.today()
elif isinstance(start_date, str):
start_date = datetime.date.fromisoformat(start_date)
start_datetime = datetime.datetime.combine(start_date, datetime.time.min)
start_datetime_utc = start_datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
events_result = (
service.events()
.list(
calendarId="primary",
timeMin=start_datetime_utc,
maxResults=number_of_results,
singleEvents=True,
orderBy="startTime",
)
.execute()
)
events = events_result.get("items", [])
if not events:
return []
results = []
for event in events:
if "dateTime" in event["start"]:
start_time = event["start"]["dateTime"]
else:
start_time = event["start"]["date"]
if "dateTime" in event["end"]:
end_time = event["end"]["dateTime"]
else:
end_time = event["end"]["date"]
event_string = f"Status: {event['status']}, "
event_string += f"Summary: {event['summary']}, "
event_string += f"Start time: {start_time}, "
event_string += f"End time: {end_time}, "
organizer = event.get("organizer", {})
display_name = organizer.get("displayName", "N/A")
email = organizer.get("email", "N/A")
if display_name != "N/A":
event_string += f"Organizer: {display_name} ({email})"
else:
event_string += f"Organizer: {email}"
results.append(Document(text=event_string))
return results
def _get_credentials(self) -> Any:
"""Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
creds = None
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", SCOPES
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
if __name__ == "__main__":
reader = GoogleCalendarReader()
print(reader.load_data())
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
import json
from typing import Union, Sequence, Dict, Any, Callable
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type,
)
from asyncio import iscoroutinefunction
from requests.exceptions import Timeout, ConnectionError
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
def maybe_decode_sse_data(data: bytes) -> Union[dict, None]:
"""
Decode data from the streaming response.
Checks whether the incoming data is an actual
SSE data message.
Args:
data (bytes): The incoming data.
Returns:
Union[dict, None]: The decoded data or None.
"""
if data and data.startswith(b"data: "):
data = data.decode("utf-8").strip("data: ")
try:
return json.loads(data)
except json.JSONDecodeError:
return None
else:
return None
def maybe_extract_from_json(data: dict, key: str = "text") -> Union[str, None]:
"""
Extract text from a JSON response.
Args:
data (dict): The JSON response.
Returns:
Union[str, None]: The extracted text or None.
"""
if "choices" in data:
if len(data["choices"]) > 0 and key in data["choices"][0]:
return data["choices"][0][key]
else:
return None
else:
return None
def chat_messages_to_list(messages: Sequence[ChatMessage]) -> Sequence[Dict[str, Any]]:
"""
Convert a sequence of chat messages to a list of dictionaries.
Args:
messages (Sequence[ChatMessage]): A sequence of chat messages.
Returns:
Sequence[Dict[str, Any]]: A list of dictionaries.
"""
chat_messages = []
for message in messages:
if message.role in [
MessageRole.USER,
MessageRole.ASSISTANT,
MessageRole.SYSTEM,
MessageRole.TOOL,
]:
chat_messages.append(
{
"role": message.role,
"content": message.content,
**message.additional_kwargs,
}
)
return chat_messages
def create_retry_decorator(retry_limit: int) -> Callable[[Any], Any]:
"""
Create a retry decorator with the given retry limit.
Args:
retry_limit (int): The retry limit.
Returns:
Callable[[Any], Any]: The retry decorator.
"""
initial_delay = 4
max_delay = 10
return retry(
reraise=True,
stop=stop_after_attempt(retry_limit),
wait=wait_exponential(multiplier=1, min=initial_delay, max=max_delay),
retry=(
retry_if_exception_type(Timeout) | retry_if_exception_type(ConnectionError)
),
)
def retry_request(
request_func: Callable[..., Any], max_retries: int = 10, *args: Any, **kwargs: Any
) -> Any:
"""
Retry a request function.
Args:
request_func (Callable[..., Any]): The request function.
max_retries (int): The maximum number of retries.
*args (Any): The positional arguments.
**kwargs (Any): The keyword arguments.
Returns:
Any: The response.
"""
retry_func = create_retry_decorator(max_retries)
@retry_func
def retry_func(request_func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
return request_func(*args, **kwargs)
return retry_func(request_func, *args, **kwargs)
async def aretry_request(
request_func: Callable[..., Any], max_retries: int = 10, *args: Any, **kwargs: Any
) -> Any:
"""
Retry a request function asynchronously.
Args:
request_func (Callable[..., Any]): The request function.
max_retries (int): The maximum number of retries.
*args (Any): The positional arguments.
**kwargs (Any): The keyword arguments.
Returns:
Any: The response.
"""
retry_decorator = create_retry_decorator(max_retries)
@retry_decorator
async def retry_func(
request_func: Callable[..., Any], *args: Any, **kwargs: Any
) -> Any:
if iscoroutinefunction(request_func):
return await request_func(*args, **kwargs)
else:
return request_func(*args, **kwargs)
return await retry_func(request_func, *args, **kwargs)
def force_single_tool_call(response: ChatResponse) -> None:
"""
Force a single tool call in the response.
Overrides the tool calls in the response message.
"""
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
|
import json
from typing import Union, Sequence, Dict, Any, Callable
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type,
)
from asyncio import iscoroutinefunction
from requests.exceptions import Timeout, ConnectionError
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
def maybe_decode_sse_data(data: bytes) -> Union[dict, None]:
"""
Decode data from the streaming response.
Checks whether the incoming data is an actual
SSE data message.
Args:
data (bytes): The incoming data.
Returns:
Union[dict, None]: The decoded data or None.
"""
if data and data.startswith(b"data: "):
data = data.decode("utf-8").strip("data: ")
try:
return json.loads(data)
except json.JSONDecodeError:
return None
else:
return None
def maybe_extract_from_json(data: dict, key: str = "text") -> Union[str, None]:
"""
Extract text from a JSON response.
Args:
data (dict): The JSON response.
Returns:
Union[str, None]: The extracted text or None.
"""
if "choices" in data:
if len(data["choices"]) > 0 and key in data["choices"][0]:
return data["choices"][0][key]
else:
return None
else:
return None
def chat_messages_to_list(messages: Sequence[ChatMessage]) -> Sequence[Dict[str, Any]]:
"""
Convert a sequence of chat messages to a list of dictionaries.
Args:
messages (Sequence[ChatMessage]): A sequence of chat messages.
Returns:
Sequence[Dict[str, Any]]: A list of dictionaries.
"""
chat_messages = []
for message in messages:
if message.role in [
MessageRole.USER,
MessageRole.ASSISTANT,
MessageRole.SYSTEM,
MessageRole.TOOL,
]:
chat_messages.append(
{
"role": message.role,
"content": message.content,
**message.additional_kwargs,
}
)
return chat_messages
def create_retry_decorator(retry_limit: int) -> Callable[[Any], Any]:
"""
Create a retry decorator with the given retry limit.
Args:
retry_limit (int): The retry limit.
Returns:
Callable[[Any], Any]: The retry decorator.
"""
initial_delay = 4
max_delay = 10
return retry(
reraise=True,
stop=stop_after_attempt(retry_limit),
wait=wait_exponential(multiplier=1, min=initial_delay, max=max_delay),
retry=(
retry_if_exception_type(Timeout) | retry_if_exception_type(ConnectionError)
),
)
def retry_request(
request_func: Callable[..., Any], max_retries: int = 10, *args: Any, **kwargs: Any
) -> Any:
"""
Retry a request function.
Args:
request_func (Callable[..., Any]): The request function.
max_retries (int): The maximum number of retries.
*args (Any): The positional arguments.
**kwargs (Any): The keyword arguments.
Returns:
Any: The response.
"""
retry_func = create_retry_decorator(max_retries)
@retry_func
def retry_func(request_func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
return request_func(*args, **kwargs)
return retry_func(request_func, *args, **kwargs)
async def aretry_request(
request_func: Callable[..., Any], max_retries: int = 10, *args: Any, **kwargs: Any
) -> Any:
"""
Retry a request function asynchronously.
Args:
request_func (Callable[..., Any]): The request function.
max_retries (int): The maximum number of retries.
*args (Any): The positional arguments.
**kwargs (Any): The keyword arguments.
Returns:
Any: The response.
"""
retry_decorator = create_retry_decorator(max_retries)
@retry_decorator
async def retry_func(
request_func: Callable[..., Any], *args: Any, **kwargs: Any
) -> Any:
if iscoroutinefunction(request_func):
return await request_func(*args, **kwargs)
else:
return request_func(*args, **kwargs)
return await retry_func(request_func, *args, **kwargs)
def force_single_tool_call(response: ChatResponse) -> None:
"""
Force a single tool call in the response.
Overrides the tool calls in the response message.
"""
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return list(sorted(f["name"] for f in out))
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url, hf_hub_url
class HfFileSystem(AbstractFileSystem):
"""Interface to files in a Hugging face repository"""
root_marker = ""
protocol = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(
self,
repo_info: Optional[DatasetInfo] = None,
token: Optional[str] = None,
**kwargs,
):
"""
The file system can be instantiated using a huggingface_hub.hf_api.DatasetInfo object,
and can be used to list and open files from a Hugging Face dataset repository with fsspec.
Args:
repo_info (:obj:``DatasetInfo``, `optional`):
Dataset repository info from huggingface_hub.HfApi().dataset_info(...)
token (:obj:``str``, `optional`):
Hugging Face token. Will default to the locally saved token if not provided.
"""
super().__init__(self, **kwargs)
self.repo_info = repo_info
self.token = token
self.dir_cache = None
def _get_dirs(self):
if self.dir_cache is None:
self.dir_cache = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
self.dir_cache[hf_file.rfilename] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(d): {"name": str(d), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
}
)
def _open(
self,
path: str,
mode: str = "rb",
**kwargs,
):
if not isinstance(self.repo_info, DatasetInfo):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}")
url = hf_hub_url(self.repo_info.id, path, revision=self.repo_info.sha)
return fsspec.open(
url,
mode=mode,
headers=get_authentication_headers_for_url(url, use_auth_token=self.token),
).open()
def info(self, path, **kwargs):
self._get_dirs()
path = self._strip_protocol(path)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(path)
def ls(self, path, detail=False, **kwargs):
self._get_dirs()
path = PurePosixPath(path.strip("/"))
paths = {}
for p, f in self.dir_cache.items():
p = PurePosixPath(p.strip("/"))
root = p.parent
if root == path:
paths[str(p)] = f
out = list(paths.values())
if detail:
return out
else:
return list(sorted(f["name"] for f in out))
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "int8", "binary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using usearch
rescore_multiplier = 4
results, search_time = semantic_search_usearch(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "int8", "binary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using usearch
rescore_multiplier = 4
results, search_time = semantic_search_usearch(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, MultiIndex, Series
from pandas import concat as pandas_concat
PANDAS_INSTALLED = True
except ImportError:
MultiIndex = object
DataFrame = object
Series = object
pandas_concat = None
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold as XGBKFold
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold as XGBKFold
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
LabelEncoder = object
XGBKFold = None
XGBStratifiedKFold = None
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy # pylint: disable=import-error
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy # pylint: disable=import-error
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
# pylint: disable= invalid-name, unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Dict, List, Optional, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes) -> str:
"""convert c string back to python string"""
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, MultiIndex, Series
from pandas import concat as pandas_concat
PANDAS_INSTALLED = True
except ImportError:
MultiIndex = object
DataFrame = object
Series = object
pandas_concat = None
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold as XGBKFold
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold as XGBKFold
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
LabelEncoder = object
XGBKFold = None
XGBStratifiedKFold = None
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy # pylint: disable=import-error
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy # pylint: disable=import-error
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
from __future__ import annotations
import sys
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .Router import Asym, Router
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
sys.modules["sentence_transformers.models.Asym"] = sys.modules["sentence_transformers.models.Router"]
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
"Router",
]
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class CondInst(SingleStageInstanceSegmentor):
"""Implementation of `CondInst <https://arxiv.org/abs/2003.05664>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class CondInst(SingleStageInstanceSegmentor):
"""Implementation of `CondInst <https://arxiv.org/abs/2003.05664>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .torchscript_consistency_impl import Transforms, TransformsFloat32Only
@skipIfNoCuda
class TestTransformsFloat32(Transforms, TransformsFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class TestTransformsFloat64(Transforms, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from .torchscript_consistency_impl import Transforms, TransformsFloat32Only
@skipIfNoCuda
class TestTransformsFloat32(Transforms, TransformsFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class TestTransformsFloat64(Transforms, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001))
# dataset settings
train_dataloader = dict(batch_size=1, num_workers=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
"""News article reader using Newspaper."""
import logging
from importlib.util import find_spec
from typing import Any, Generator, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class NewsArticleReader(BaseReader):
"""
Simple news article reader.
Reads news articles from the web and parses them using the `newspaper` library.
Args:
text_mode (bool): Whether to load a text version or HTML version of the content (default=True).
use_nlp (bool): Whether to use NLP to extract additional summary and keywords (default=True).
newspaper_kwargs: Additional keyword arguments to pass to newspaper.Article. See
https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#article
"""
def __init__(
self, text_mode: bool = True, use_nlp: bool = True, **newspaper_kwargs: Any
) -> None:
"""Initialize with parameters."""
if find_spec("newspaper") is None:
raise ImportError(
"`newspaper` package not found, please run `pip install newspaper3k`"
)
self.load_text = text_mode
self.use_nlp = use_nlp
self.newspaper_kwargs = newspaper_kwargs
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the list of news article urls.
Args:
urls (List[str]): List of URLs to load news articles.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list) and not isinstance(urls, Generator):
raise ValueError("urls must be a list or generator.")
documents = []
for url in urls:
from newspaper import Article
try:
article = Article(url, **self.newspaper_kwargs)
article.download()
article.parse()
if self.use_nlp:
article.nlp()
except Exception as e:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
metadata = {
"title": getattr(article, "title", ""),
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
"authors": getattr(article, "authors", []),
"language": getattr(article, "meta_lang", ""),
"description": getattr(article, "meta_description", ""),
"publish_date": getattr(article, "publish_date", ""),
}
if self.load_text:
content = article.text
else:
content = article.html
if self.use_nlp:
metadata["keywords"] = getattr(article, "keywords", [])
metadata["summary"] = getattr(article, "summary", "")
documents.append(Document(text=content, metadata=metadata))
return documents
if __name__ == "__main__":
reader = NewsArticleReader()
article = reader.load_data(["https://www.bbc.com/news/world-us-canada-56797998"])
print(article)
|
"""News article reader using Newspaper."""
import logging
from importlib.util import find_spec
from typing import Any, Generator, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class NewsArticleReader(BaseReader):
"""
Simple news article reader.
Reads news articles from the web and parses them using the `newspaper` library.
Args:
text_mode (bool): Whether to load a text version or HTML version of the content (default=True).
use_nlp (bool): Whether to use NLP to extract additional summary and keywords (default=True).
newspaper_kwargs: Additional keyword arguments to pass to newspaper.Article. See
https://newspaper.readthedocs.io/en/latest/user_guide/quickstart.html#article
"""
def __init__(
self, text_mode: bool = True, use_nlp: bool = True, **newspaper_kwargs: Any
) -> None:
"""Initialize with parameters."""
if find_spec("newspaper") is None:
raise ImportError(
"`newspaper` package not found, please run `pip install newspaper3k`"
)
self.load_text = text_mode
self.use_nlp = use_nlp
self.newspaper_kwargs = newspaper_kwargs
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the list of news article urls.
Args:
urls (List[str]): List of URLs to load news articles.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list) and not isinstance(urls, Generator):
raise ValueError("urls must be a list or generator.")
documents = []
for url in urls:
from newspaper import Article
try:
article = Article(url, **self.newspaper_kwargs)
article.download()
article.parse()
if self.use_nlp:
article.nlp()
except Exception as e:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
metadata = {
"title": getattr(article, "title", ""),
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
"authors": getattr(article, "authors", []),
"language": getattr(article, "meta_lang", ""),
"description": getattr(article, "meta_description", ""),
"publish_date": getattr(article, "publish_date", ""),
}
if self.load_text:
content = article.text
else:
content = article.html
if self.use_nlp:
metadata["keywords"] = getattr(article, "keywords", [])
metadata["summary"] = getattr(article, "summary", "")
documents.append(Document(text=content, metadata=metadata))
return documents
if __name__ == "__main__":
reader = NewsArticleReader()
article = reader.load_data(["https://www.bbc.com/news/world-us-canada-56797998"])
print(article)
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of NdArray, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageNdArray, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of NdArray, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[bytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
def sentry_alert(error: Exception):
sentry_sdk.capture_exception(error)
sentry_sdk.flush()
|
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
|
"""
Experimental Object Oriented Distributed API - torch.distributed._dist2
=======================================================================
This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely.
This is intended as a proving ground for more flexible and object oriented distributed APIs.
"""
from collections.abc import Generator
from contextlib import contextmanager
from datetime import timedelta
from typing import Protocol, Union
import torch
from torch._C._distributed_c10d import (
_current_process_group,
_set_process_group,
Backend,
ProcessGroup,
ReduceOp,
Store,
)
from torch.distributed.rendezvous import rendezvous
_BACKENDS: dict[str, "ProcessGroupFactory"] = {}
__all__ = [
"ProcessGroup",
"ReduceOp",
"ProcessGroupFactory",
"register_backend",
"new_group",
"current_process_group",
"process_group",
]
class ProcessGroupFactory(Protocol):
"""Protocol for process group factories."""
def __call__(
self,
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup: ...
def register_backend(name: str, func: ProcessGroupFactory) -> None:
"""
Register a new process group backend.
Args:
name: The name of the backend.
func: The function to create the process group.
"""
if name in _BACKENDS:
raise ValueError(f"Backend {name} already registered")
_BACKENDS[name] = func
def _gloo_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupGloo
assert pg_options is None, "Gloo backend does not support options"
backend_class = ProcessGroupGloo(store, rank, world_size, timeout)
backend_class._set_sequence_number_for_group()
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.GLOO)
# register devices
pg._register_backend(device, ProcessGroup.BackendType.GLOO, backend_class)
pg._register_backend(
torch.device("cpu"), ProcessGroup.BackendType.GLOO, backend_class
)
if torch.cuda.is_available():
pg._register_backend(
torch.device("cuda"), ProcessGroup.BackendType.GLOO, backend_class
)
return pg
def _nccl_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupNCCL
assert isinstance(pg_options, ProcessGroupNCCL.Options)
pg_options._timeout = timeout
backend_class = ProcessGroupNCCL(store, rank, world_size, pg_options)
backend_class._set_sequence_number_for_group()
backend_class.eager_connect_single_device(device)
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.NCCL)
pg._register_backend(device, ProcessGroup.BackendType.NCCL, backend_class)
return pg
register_backend("gloo", _gloo_factory)
register_backend("nccl", _nccl_factory)
def new_group(
backend: str,
timeout: timedelta,
device: Union[str, torch.device],
pg_options: Backend.Options,
) -> ProcessGroup:
"""
Create a new process group with the given backend and options. This group is
independent and will not be globally registered and thus not usable via the
standard torch.distributed.* APIs.
Args:
backend: The backend to use for the process group.
timeout: The timeout for collective operations.
device: The device to use for the process group.
pg_options: The options to use for the process group.
Returns:
A new process group.
"""
if backend not in _BACKENDS:
raise ValueError(f"Backend {backend} not registered")
device = torch.device(device)
store, rank, world_size = next(iter(rendezvous("env://")))
store.set_timeout(timeout)
return _BACKENDS[backend](store, rank, world_size, timeout, device, pg_options)
def current_process_group() -> ProcessGroup:
"""
Get the current process group. Thread local method.
Returns:
The current process group.
"""
return _current_process_group()
@contextmanager
def process_group(pg: ProcessGroup) -> Generator[None, None, None]:
"""
Context manager for process groups. Thread local method.
Args:
pg: The process group to use.
"""
prev_pg = current_process_group()
_set_process_group(pg)
try:
yield
finally:
_set_process_group(prev_pg)
|
"""
Experimental Object Oriented Distributed API - torch.distributed._dist2
=======================================================================
This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely.
This is intended as a proving ground for more flexible and object oriented distributed APIs.
"""
from collections.abc import Generator
from contextlib import contextmanager
from datetime import timedelta
from typing import Protocol, Union
import torch
from torch._C._distributed_c10d import (
_current_process_group,
_set_process_group,
Backend,
ProcessGroup,
Store,
)
from torch.distributed.rendezvous import rendezvous
_BACKENDS: dict[str, "ProcessGroupFactory"] = {}
class ProcessGroupFactory(Protocol):
"""Protocol for process group factories."""
def __call__(
self,
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup: ...
def register_backend(name: str, func: ProcessGroupFactory) -> None:
"""
Register a new process group backend.
Args:
name: The name of the backend.
func: The function to create the process group.
"""
if name in _BACKENDS:
raise ValueError(f"Backend {name} already registered")
_BACKENDS[name] = func
def _gloo_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupGloo
assert pg_options is None, "Gloo backend does not support options"
backend_class = ProcessGroupGloo(store, rank, world_size, timeout)
backend_class._set_sequence_number_for_group()
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.GLOO)
# register devices
pg._register_backend(device, ProcessGroup.BackendType.GLOO, backend_class)
pg._register_backend(
torch.device("cpu"), ProcessGroup.BackendType.GLOO, backend_class
)
if torch.cuda.is_available():
pg._register_backend(
torch.device("cuda"), ProcessGroup.BackendType.GLOO, backend_class
)
return pg
def _nccl_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupNCCL
assert isinstance(pg_options, ProcessGroupNCCL.Options)
pg_options._timeout = timeout
backend_class = ProcessGroupNCCL(store, rank, world_size, pg_options)
backend_class._set_sequence_number_for_group()
backend_class.eager_connect_single_device(device)
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.NCCL)
pg._register_backend(device, ProcessGroup.BackendType.NCCL, backend_class)
return pg
register_backend("gloo", _gloo_factory)
register_backend("nccl", _nccl_factory)
def new_group(
backend: str,
timeout: timedelta,
device: Union[str, torch.device],
pg_options: Backend.Options,
) -> ProcessGroup:
"""
Create a new process group with the given backend and options. This group is
independent and will not be globally registered and thus not usable via the
standard torch.distributed.* APIs.
Args:
backend: The backend to use for the process group.
timeout: The timeout for collective operations.
device: The device to use for the process group.
pg_options: The options to use for the process group.
Returns:
A new process group.
"""
if backend not in _BACKENDS:
raise ValueError(f"Backend {backend} not registered")
device = torch.device(device)
store, rank, world_size = next(iter(rendezvous("env://")))
store.set_timeout(timeout)
return _BACKENDS[backend](store, rank, world_size, timeout, device, pg_options)
def current_process_group() -> ProcessGroup:
"""
Get the current process group. Thread local method.
Returns:
The current process group.
"""
return _current_process_group()
@contextmanager
def process_group(pg: ProcessGroup) -> Generator[None, None, None]:
"""
Context manager for process groups. Thread local method.
Args:
pg: The process group to use.
"""
prev_pg = current_process_group()
_set_process_group(pg)
try:
yield
finally:
_set_process_group(prev_pg)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
Model Sparsity Stats: num_rows: 1500, num_cols: 30522, row_non_zero_mean: 80.34333038330078, row_sparsity_mean: 0.9973676204681396
Model Sparsity Stats: num_rows: 1500, num_cols: 30522, row_non_zero_mean: 81.78266906738281, row_sparsity_mean: 0.9973204731941223
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from autogpt_libs.auth.middleware import auth_middleware
from fastapi import APIRouter, Depends, HTTPException
from backend.server.utils import get_user_id
from .models import ApiResponse, ChatRequest
from .service import OttoService
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post(
"/ask",
response_model=ApiResponse,
dependencies=[Depends(auth_middleware)],
summary="Proxy Otto Chat Request",
)
async def proxy_otto_request(
request: ChatRequest, user_id: str = Depends(get_user_id)
) -> ApiResponse:
"""
Proxy requests to Otto API while adding necessary security headers and logging.
Requires an authenticated user.
"""
logger.debug("Forwarding request to Otto for user %s", user_id)
try:
return await OttoService.ask(request, user_id)
except Exception as e:
logger.exception("Otto request failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=502,
detail={"message": str(e), "hint": "Check Otto service status."},
)
|
import logging
from autogpt_libs.auth.middleware import auth_middleware
from fastapi import APIRouter, Depends, HTTPException
from backend.server.utils import get_user_id
from .models import ApiResponse, ChatRequest
from .service import OttoService
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post(
"/ask", response_model=ApiResponse, dependencies=[Depends(auth_middleware)]
)
async def proxy_otto_request(
request: ChatRequest, user_id: str = Depends(get_user_id)
) -> ApiResponse:
"""
Proxy requests to Otto API while adding necessary security headers and logging.
Requires an authenticated user.
"""
logger.debug("Forwarding request to Otto for user %s", user_id)
try:
return await OttoService.ask(request, user_id)
except Exception as e:
logger.exception("Otto request failed for user %s: %s", user_id, e)
raise HTTPException(
status_code=502,
detail={"message": str(e), "hint": "Check Otto service status."},
)
|
"""Tool for the Google search API."""
from typing import Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchRun",
)
class GoogleSearchRun(BaseTool):
"""Tool that queries the Google search API."""
name: str = "google_search"
description: str = (
"A wrapper around Google Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
api_wrapper: GoogleSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchResults",
)
class GoogleSearchResults(BaseTool):
"""Tool that queries the Google Search API and gets back json."""
name: str = "google_search_results_json"
description: str = (
"A wrapper around Google Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query. Output is a JSON array of the query results"
)
num_results: int = 4
api_wrapper: GoogleSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query, self.num_results))
|
"""Tool for the Google search API."""
from typing import Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchRun",
)
class GoogleSearchRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google search API."""
name: str = "google_search"
description: str = (
"A wrapper around Google Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
api_wrapper: GoogleSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GoogleSearchResults",
)
class GoogleSearchResults(BaseTool): # type: ignore[override]
"""Tool that queries the Google Search API and gets back json."""
name: str = "google_search_results_json"
description: str = (
"A wrapper around Google Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query. Output is a JSON array of the query results"
)
num_results: int = 4
api_wrapper: GoogleSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query, self.num_results))
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
import logging
import sys
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
# test whether suppress root handlers
logging.root.handlers.append(logging.StreamHandler(sys.stdout))
with JinaLogger('test_logger', suppress_root_logging=False) as logger:
log(logger)
assert len(logging.root.handlers) > 0
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
import logging
import sys
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
# test whether suppress root handlers
logging.root.handlers.append(logging.StreamHandler(sys.stdout))
with JinaLogger('test_logger', suppress_root_logging=False) as logger:
log(logger)
assert len(logging.root.handlers) > 0
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logging.root.handlers) == 0
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING:
import numpy as np
from docarray.typing import ArrayType
from docarray import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
on: Optional[str] = None,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param on: specifies a subindex to search on. If set, the returned DocumentArray will be retrieved from the given subindex.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
on=on,
**kwargs,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING, Dict
if TYPE_CHECKING:
import numpy as np
from docarray.typing import ArrayType
from docarray import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
filter: Optional[Dict] = None,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param filter: filter query used for pre-filtering
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
filter=filter,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
import numpy as np
import pytest
import torch
from docarray.base_document import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDocument):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
bytes_: bytes
doc = Mmdoc(
img=np.zeros((10)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(10),
bytes_=b'hello',
)
return doc, Mmdoc
def test_to_json(doc_and_class):
doc, _ = doc_and_class
doc.json()
def test_from_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
def test_to_dict_to_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(orjson_dumps(doc.dict()))
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
import numpy as np
import pytest
import torch
from docarray.base_document import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDocument):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
doc = Mmdoc(
img=np.zeros((10)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(10),
)
return doc, Mmdoc
def test_to_json(doc_and_class):
doc, _ = doc_and_class
doc.json()
def test_from_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
def test_to_dict_to_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(orjson_dumps(doc.dict()))
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
import pathlib
from collections.abc import Iterator
from typing import Any, BinaryIO, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> list[OnlineResource]:
resources: list[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: tuple[tuple[str, BinaryIO], tuple[int, int, int, int, int, str]]) -> dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> list[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class RandomGrayscaleTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomGrayscale,
init_kwargs={
"factor": 0.5,
"data_format": "channels_last",
},
input_shape=(1, 2, 2, 3),
supports_masking=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
layers.RandomGrayscale,
init_kwargs={
"factor": 0.5,
"data_format": "channels_first",
},
input_shape=(1, 3, 2, 2),
supports_masking=False,
expected_output_shape=(1, 3, 2, 2),
)
@parameterized.named_parameters(
("channels_last", "channels_last"), ("channels_first", "channels_first")
)
def test_grayscale_conversion(self, data_format):
if data_format == "channels_last":
xs = np.random.uniform(0, 255, size=(2, 4, 4, 3)).astype(np.float32)
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
self.assertEqual(transformed.shape[-1], 3)
for img in transformed:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
self.assertTrue(np.allclose(r, g) and np.allclose(g, b))
else:
xs = np.random.uniform(0, 255, size=(2, 3, 4, 4)).astype(np.float32)
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
self.assertEqual(transformed.shape[1], 3)
for img in transformed:
r, g, b = img[0], img[1], img[2]
self.assertTrue(np.allclose(r, g) and np.allclose(g, b))
def test_invalid_factor(self):
with self.assertRaises(ValueError):
layers.RandomGrayscale(factor=-0.1)
with self.assertRaises(ValueError):
layers.RandomGrayscale(factor=1.1)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)) * 255
else:
input_data = np.random.random((2, 3, 8, 8)) * 255
layer = layers.RandomGrayscale(factor=0.5, data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output_array = output.numpy()
self.assertEqual(output_array.shape, input_data.shape)
def test_grayscale_with_single_color_image(self):
test_cases = [
# batched inputs
(np.full((1, 4, 4, 3), 128, dtype=np.float32), "channels_last"),
(np.full((1, 3, 4, 4), 128, dtype=np.float32), "channels_first"),
# unbatched inputs
(np.full((4, 4, 3), 128, dtype=np.float32), "channels_last"),
(np.full((3, 4, 4), 128, dtype=np.float32), "channels_first"),
]
for xs, data_format in test_cases:
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
# Determine if the input was batched
is_batched = len(xs.shape) == 4
# If batched, select the first image from the batch for inspection.
# Otherwise, use the transformed image directly.
# `image_to_inspect` will always be a 3D tensor.
if is_batched:
image_to_inspect = transformed[0]
else:
image_to_inspect = transformed
if data_format == "channels_last":
# image_to_inspect has shape (H, W, C),
# get the first channel [:, :, 0]
channel_data = image_to_inspect[:, :, 0]
else: # data_format == "channels_first"
# image_to_inspect has shape (C, H, W),
# get the first channel [0, :, :]
channel_data = image_to_inspect[0, :, :]
unique_vals = np.unique(channel_data)
self.assertEqual(len(unique_vals), 1)
|
import numpy as np
import pytest
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
class RandomGrayscaleTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomGrayscale,
init_kwargs={
"factor": 0.5,
"data_format": "channels_last",
},
input_shape=(1, 2, 2, 3),
supports_masking=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
layers.RandomGrayscale,
init_kwargs={
"factor": 0.5,
"data_format": "channels_first",
},
input_shape=(1, 3, 2, 2),
supports_masking=False,
expected_output_shape=(1, 3, 2, 2),
)
@parameterized.named_parameters(
("channels_last", "channels_last"), ("channels_first", "channels_first")
)
def test_grayscale_conversion(self, data_format):
if data_format == "channels_last":
xs = np.random.uniform(0, 255, size=(2, 4, 4, 3)).astype(np.float32)
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
self.assertEqual(transformed.shape[-1], 3)
for img in transformed:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
self.assertTrue(np.allclose(r, g) and np.allclose(g, b))
else:
xs = np.random.uniform(0, 255, size=(2, 3, 4, 4)).astype(np.float32)
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
self.assertEqual(transformed.shape[1], 3)
for img in transformed:
r, g, b = img[0], img[1], img[2]
self.assertTrue(np.allclose(r, g) and np.allclose(g, b))
def test_invalid_factor(self):
with self.assertRaises(ValueError):
layers.RandomGrayscale(factor=-0.1)
with self.assertRaises(ValueError):
layers.RandomGrayscale(factor=1.1)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3)) * 255
else:
input_data = np.random.random((2, 3, 8, 8)) * 255
layer = layers.RandomGrayscale(factor=0.5, data_format=data_format)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output_array = output.numpy()
self.assertEqual(output_array.shape, input_data.shape)
def test_grayscale_with_single_color_image(self):
test_cases = [
(np.full((1, 4, 4, 3), 128, dtype=np.float32), "channels_last"),
(np.full((1, 3, 4, 4), 128, dtype=np.float32), "channels_first"),
]
for xs, data_format in test_cases:
layer = layers.RandomGrayscale(factor=1.0, data_format=data_format)
transformed = ops.convert_to_numpy(layer(xs))
if data_format == "channels_last":
unique_vals = np.unique(transformed[0, :, :, 0])
self.assertEqual(len(unique_vals), 1)
else:
unique_vals = np.unique(transformed[0, 0, :, :])
self.assertEqual(len(unique_vals), 1)
|
from ._source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
"SourceSeparationBundle",
"CONVTASNET_BASE_LIBRI2MIX",
]
|
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'RTMDet', 'CrowdDet', 'CondInst', 'BoxInst'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'RTMDet', 'CrowdDet', 'CondInst'
]
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileNetV1."""
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
Unpack,
)
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling
from ...utils import auto_docstring
@auto_docstring
class MobileNetV1ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"shortest_edge": 256}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
def __init__(self, **kwargs: Unpack[DefaultFastImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
__all__ = ["MobileNetV1ImageProcessorFast"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileNetV1."""
from ...image_processing_utils_fast import (
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
Unpack,
)
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling
from ...utils import add_start_docstrings
@add_start_docstrings(
"Constructs a fast MobileNetV1 image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class MobileNetV1ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"shortest_edge": 256}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
def __init__(self, **kwargs: Unpack[DefaultFastImageProcessorKwargs]) -> None:
super().__init__(**kwargs)
__all__ = ["MobileNetV1ImageProcessorFast"]
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implementation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implmentation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
from inspect import signature
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
def method(self):
pass
@deprecated("n_features_ is deprecated") # type: ignore[prop-decorator]
@property
def n_features_(self):
"""Number of input features."""
return 10
class MockClass3:
@deprecated()
def __init__(self):
pass
class MockClass4:
pass
class MockClass5(MockClass1):
"""Inherit from deprecated class but does not call super().__init__."""
def __init__(self, a):
self.a = a
@deprecated("a message")
class MockClass6:
"""A deprecated class that overrides __new__."""
def __new__(cls, *args, **kwargs):
assert len(args) > 0
return super().__new__(cls)
@deprecated()
def mock_function():
return 10
def test_deprecated():
with pytest.warns(FutureWarning, match="qwerty"):
MockClass1()
with pytest.warns(FutureWarning, match="mockclass2_method"):
MockClass2().method()
with pytest.warns(FutureWarning, match="deprecated"):
MockClass3()
with pytest.warns(FutureWarning, match="qwerty"):
MockClass5(42)
with pytest.warns(FutureWarning, match="a message"):
MockClass6(42)
with pytest.warns(FutureWarning, match="deprecated"):
val = mock_function()
assert val == 10
def test_is_deprecated():
# Test if _is_deprecated helper identifies wrapping via deprecated
# NOTE it works only for class methods and functions
assert _is_deprecated(MockClass1.__new__)
assert _is_deprecated(MockClass2().method)
assert _is_deprecated(MockClass3.__init__)
assert not _is_deprecated(MockClass4.__init__)
assert _is_deprecated(MockClass5.__new__)
assert _is_deprecated(mock_function)
def test_pickle():
pickle.loads(pickle.dumps(mock_function))
def test_deprecated_class_signature():
@deprecated()
class MockClass:
def __init__(self, a, b=1, c=2):
pass
assert list(signature(MockClass).parameters.keys()) == ["a", "b", "c"]
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pickle
from inspect import signature
import pytest
from sklearn.utils.deprecation import _is_deprecated, deprecated
@deprecated("qwerty")
class MockClass1:
pass
class MockClass2:
@deprecated("mockclass2_method")
def method(self):
pass
@deprecated("n_features_ is deprecated") # type: ignore
@property
def n_features_(self):
"""Number of input features."""
return 10
class MockClass3:
@deprecated()
def __init__(self):
pass
class MockClass4:
pass
class MockClass5(MockClass1):
"""Inherit from deprecated class but does not call super().__init__."""
def __init__(self, a):
self.a = a
@deprecated("a message")
class MockClass6:
"""A deprecated class that overrides __new__."""
def __new__(cls, *args, **kwargs):
assert len(args) > 0
return super().__new__(cls)
@deprecated()
def mock_function():
return 10
def test_deprecated():
with pytest.warns(FutureWarning, match="qwerty"):
MockClass1()
with pytest.warns(FutureWarning, match="mockclass2_method"):
MockClass2().method()
with pytest.warns(FutureWarning, match="deprecated"):
MockClass3()
with pytest.warns(FutureWarning, match="qwerty"):
MockClass5(42)
with pytest.warns(FutureWarning, match="a message"):
MockClass6(42)
with pytest.warns(FutureWarning, match="deprecated"):
val = mock_function()
assert val == 10
def test_is_deprecated():
# Test if _is_deprecated helper identifies wrapping via deprecated
# NOTE it works only for class methods and functions
assert _is_deprecated(MockClass1.__new__)
assert _is_deprecated(MockClass2().method)
assert _is_deprecated(MockClass3.__init__)
assert not _is_deprecated(MockClass4.__init__)
assert _is_deprecated(MockClass5.__new__)
assert _is_deprecated(mock_function)
def test_pickle():
pickle.loads(pickle.dumps(mock_function))
def test_deprecated_class_signature():
@deprecated()
class MockClass:
def __init__(self, a, b=1, c=2):
pass
assert list(signature(MockClass).parameters.keys()) == ["a", "b", "c"]
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
neck=dict(
type='PAFPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
neck=dict(
type='PAFPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5))
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
train_batch_size = 16
output_dir = "output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss: https://sbert.net/docs/package_reference/losses.html#softmaxloss
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
logging.info("Evaluation before training:")
dev_evaluator(model)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-nli-v1")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-nli-v1')`."
)
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "data/AllNLI.tsv.gz"
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
# Read the dataset
train_batch_size = 16
model_save_path = (
"output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Read the AllNLI.tsv.gz file and create the training dataset
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(
model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int)
)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "dev":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
# Configure the training
num_epochs = 1
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "test":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
test_evaluator(model, output_path=model_save_path)
|
from __future__ import annotations
import pytest
from sentence_transformers.cross_encoder import CrossEncoder
@pytest.mark.parametrize(
"model_name, expected_score",
[
("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]),
("cross-encoder/ms-marco-TinyBERT-L-2-v2", [8.142767906188965, 1.2057735919952393, -2.7283530235290527]),
("cross-encoder/stsb-distilroberta-base", [0.4977430999279022, 0.255491703748703, 0.28261035680770874]),
("mixedbread-ai/mxbai-rerank-xsmall-v1", [0.9224735498428345, 0.04793589934706688, 0.03315146267414093]),
],
)
def test_pretrained_model(model_name: str, expected_score: list[float]) -> None:
# Ensure that pretrained models are not accidentally changed
model = CrossEncoder(model_name)
query = "is toprol xl the same as metoprolol?"
answers = [
"Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.",
"Pill with imprint 1 is White, Round and has been identified as Metoprolol Tartrate 25 mg.",
"Interactions between your drugs No interactions were found between Allergy Relief and metoprolol. This does not necessarily mean no interactions exist. Always consult your healthcare provider.",
]
scores = model.predict([(query, answer) for answer in answers])
assert scores.tolist() == pytest.approx(expected_score, rel=1e-4)
|
from __future__ import annotations
import pytest
from sentence_transformers.cross_encoder import CrossEncoder
@pytest.mark.parametrize(
"model_name, expected_score",
[
("cross-encoder/ms-marco-MiniLM-L-6-v2", [8.12545108795166, -3.045016050338745, -3.1524128913879395]),
("cross-encoder/ms-marco-TinyBERT-L-2-v2", [8.142767906188965, 1.2057735919952393, -2.7283530235290527]),
("cross-encoder/stsb-distilroberta-base", [0.4977430999279022, 0.255491703748703, 0.28261035680770874]),
("mixedbread-ai/mxbai-rerank-xsmall-v1", [0.9224735498428345, 0.04793589934706688, 0.03315146267414093]),
],
)
def test_pretrained_model(model_name: str, expected_score: list[float]) -> None:
# Ensure that pretrained models are not accidentally changed
model = CrossEncoder(model_name)
query = "is toprol xl the same as metoprolol?"
answers = [
"Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.",
"Pill with imprint 1 is White, Round and has been identified as Metoprolol Tartrate 25 mg.",
"Interactions between your drugs No interactions were found between Allergy Relief and metoprolol. This does not necessarily mean no interactions exist. Always consult your healthcare provider.",
]
scores = model.predict([(query, answer) for answer in answers])
assert scores.tolist() == pytest.approx(expected_score, rel=1e-6, abs=1e-12)
|
r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py.
A lot of multiprocessing is used in data loading, which only supports running
functions defined in global environment (py2 can't serialize static methods).
Therefore, for code tidiness we put these functions into different files in this
folder.
"""
import atexit
import sys
# old private location of the ExceptionWrapper that some users rely on:
from torch._utils import ExceptionWrapper
IS_WINDOWS = sys.platform == "win32"
MP_STATUS_CHECK_INTERVAL = 5.0
r"""Interval (in seconds) to check status of processes to avoid hanging in
multiprocessing data loading. This is mainly used in getting data from
another process, in which case we need to periodically check whether the
sender is alive to prevent hanging."""
python_exit_status = False
r"""Whether Python is shutting down. This flag is guaranteed to be set before
the Python core library resources are freed, but Python may already be exiting
for some time when this is set.
Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar
hook in Python 3.7 multiprocessing library:
https://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327
"""
try:
import numpy
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
def _set_python_exit_flag() -> None:
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
from . import collate, fetch, pin_memory, signal_handling, worker
|
# mypy: allow-untyped-defs
r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py.
A lot of multiprocessing is used in data loading, which only supports running
functions defined in global environment (py2 can't serialize static methods).
Therefore, for code tidiness we put these functions into different files in this
folder.
"""
import atexit
import sys
# old private location of the ExceptionWrapper that some users rely on:
from torch._utils import ExceptionWrapper
IS_WINDOWS = sys.platform == "win32"
MP_STATUS_CHECK_INTERVAL = 5.0
r"""Interval (in seconds) to check status of processes to avoid hanging in
multiprocessing data loading. This is mainly used in getting data from
another process, in which case we need to periodically check whether the
sender is alive to prevent hanging."""
python_exit_status = False
r"""Whether Python is shutting down. This flag is guaranteed to be set before
the Python core library resources are freed, but Python may already be exiting
for some time when this is set.
Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar
hook in Python 3.7 multiprocessing library:
https://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327
"""
try:
import numpy
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
def _set_python_exit_flag():
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
from . import collate, fetch, pin_memory, signal_handling, worker
|
from typing import TYPE_CHECKING, Dict, Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.proto import DocumentArrayProto, NodeProto
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
content_type = pb_msg.WhichOneof('content')
if content_type == 'list_':
return cls(
cls.document_type.from_protobuf(doc_proto)
for doc_proto in pb_msg.list_.docs
)
elif content_type == 'stack':
da = cls(
cls.document_type.from_protobuf(doc_proto)
for doc_proto in pb_msg.stack.list_.docs
)
da._columns = pb_msg.stack.columns
return da
else:
raise ValueError(f'proto message content {content_type} is not supported')
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
from docarray.proto import (
DocumentArrayListProto,
DocumentArrayProto,
DocumentArrayStackedProto,
UnionArrayProto,
)
if self.is_stacked() and self._columns is not None:
da_proto = DocumentArrayListProto()
for doc in self.__iter_over_stacked_documents__():
da_proto.docs.append(doc.to_protobuf())
columns_proto: Dict[str, UnionArrayProto] = dict()
for field, column in self._columns.items():
if isinstance(column, ProtoArrayMixin):
columns_proto[field] = UnionArrayProto(
document_array=DocumentArrayProto(stack=column.to_protobuf())
)
elif isinstance(column, AbstractTensor):
columns_proto[field] = UnionArrayProto(ndarray=column.to_protobuf())
da_proto = DocumentArrayStackedProto(list_=da_proto, columns=columns_proto)
return DocumentArrayProto(stack=da_proto)
else:
da_proto = DocumentArrayListProto()
for doc in self:
da_proto.docs.append(doc.to_protobuf())
return DocumentArrayProto(list_=da_proto)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert a DocumentArray into a NodeProto protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(chunks=self.to_protobuf())
|
from typing import TYPE_CHECKING, Type
from docarray.array.abstract_array import AbstractDocumentArray
if TYPE_CHECKING:
from docarray.proto import DocumentArrayProto, NodeProto
class ProtoArrayMixin(AbstractDocumentArray):
@classmethod
def from_protobuf(
cls: Type[AbstractDocumentArray], pb_msg: 'DocumentArrayProto'
) -> AbstractDocumentArray:
"""create a Document from a protobuf message"""
return cls(cls.document_type.from_protobuf(od) for od in pb_msg.docs)
def to_protobuf(self) -> 'DocumentArrayProto':
"""Convert DocumentArray into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``,
if set it will force all ndarray-like object from all
Documents to ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
from docarray.proto import DocumentArrayProto
dap = DocumentArrayProto()
for doc in self:
dap.docs.append(doc.to_protobuf())
return dap
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert a DocumentArray into a NodeProto protobuf message.
This function should be called when a DocumentArray
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(chunks=self.to_protobuf())
|
from logging import Logger
from backend.util.settings import AppEnvironment, BehaveAs, Settings
settings = Settings()
def configure_logging():
import logging
import autogpt_libs.logging.config
if (
settings.config.behave_as == BehaveAs.LOCAL
or settings.config.app_env == AppEnvironment.LOCAL
):
autogpt_libs.logging.config.configure_logging(force_cloud_logging=False)
else:
autogpt_libs.logging.config.configure_logging(force_cloud_logging=True)
# Silence httpx logger
logging.getLogger("httpx").setLevel(logging.WARNING)
class TruncatedLogger:
def __init__(
self,
logger: Logger,
prefix: str = "",
metadata: dict | None = None,
max_length: int = 1000,
):
self.logger = logger
self.metadata = metadata or {}
self.max_length = max_length
self.prefix = prefix
def info(self, msg: str, **extra):
msg = self._wrap(msg, **extra)
self.logger.info(msg, extra=self._get_metadata(**extra))
def warning(self, msg: str, **extra):
msg = self._wrap(msg, **extra)
self.logger.warning(msg, extra=self._get_metadata(**extra))
def error(self, msg: str, **extra):
msg = self._wrap(msg, **extra)
self.logger.error(msg, extra=self._get_metadata(**extra))
def debug(self, msg: str, **extra):
msg = self._wrap(msg, **extra)
self.logger.debug(msg, extra=self._get_metadata(**extra))
def exception(self, msg: str, **extra):
msg = self._wrap(msg, **extra)
self.logger.exception(msg, extra=self._get_metadata(**extra))
def _get_metadata(self, **extra):
metadata = {**self.metadata, **extra}
return {"json_fields": metadata} if metadata else {}
def _wrap(self, msg: str, **extra):
extra_msg = str(extra or "")
if len(extra_msg) > 1000:
extra_msg = extra_msg[:1000] + "..."
return f"{self.prefix} {msg} {extra_msg}"
|
from backend.util.settings import AppEnvironment, BehaveAs, Settings
settings = Settings()
def configure_logging():
import logging
import autogpt_libs.logging.config
if (
settings.config.behave_as == BehaveAs.LOCAL
or settings.config.app_env == AppEnvironment.LOCAL
):
autogpt_libs.logging.config.configure_logging(force_cloud_logging=False)
else:
autogpt_libs.logging.config.configure_logging(force_cloud_logging=True)
# Silence httpx logger
logging.getLogger("httpx").setLevel(logging.WARNING)
|
from datetime import datetime, timedelta
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class HubSpotEngagementBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (send_email, track_engagement)",
default="send_email",
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default_factory=dict,
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
)
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
)
class Output(BlockSchema):
result: dict = SchemaField(description="Operation result")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="c6524385-7d87-49d6-a470-248bd29ca765",
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
input_schema=HubSpotEngagementBlock.Input,
output_schema=HubSpotEngagementBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "send_email":
# Using the email send API
email_url = f"{base_url}/crm/v3/objects/emails"
email_data = {
"properties": {
"hs_timestamp": datetime.now().isoformat(),
"hubspot_owner_id": "1", # This should be configurable
"hs_email_direction": "OUTBOUND",
"hs_email_status": "SEND",
"hs_email_subject": input_data.email_data.get("subject"),
"hs_email_text": input_data.email_data.get("content"),
"hs_email_to_email": input_data.email_data.get("recipient"),
}
}
response = Requests().post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
elif input_data.operation == "track_engagement":
# Get engagement events for the contact
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
engagement_url = (
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
)
params = {"limit": 100, "after": from_date.isoformat()}
response = Requests().get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics
metrics = {
"email_opens": 0,
"email_clicks": 0,
"email_replies": 0,
"last_engagement": None,
"engagement_score": 0,
}
for engagement in engagements.get("results", []):
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
if eng_type == "EMAIL":
metrics["email_opens"] += 1
elif eng_type == "EMAIL_CLICK":
metrics["email_clicks"] += 1
elif eng_type == "EMAIL_REPLY":
metrics["email_replies"] += 1
# Update last engagement time
eng_time = engagement.get("properties", {}).get("hs_timestamp")
if eng_time and (
not metrics["last_engagement"]
or eng_time > metrics["last_engagement"]
):
metrics["last_engagement"] = eng_time
# Calculate simple engagement score
metrics["engagement_score"] = (
metrics["email_opens"]
+ metrics["email_clicks"] * 2
+ metrics["email_replies"] * 3
)
yield "result", metrics
yield "status", "engagement_tracked"
|
from datetime import datetime, timedelta
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (send_email, track_engagement)",
default="send_email",
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default_factory=dict,
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
)
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
)
class Output(BlockSchema):
result: dict = SchemaField(description="Operation result")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="c6524385-7d87-49d6-a470-248bd29ca765",
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
input_schema=HubSpotEngagementBlock.Input,
output_schema=HubSpotEngagementBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "send_email":
# Using the email send API
email_url = f"{base_url}/crm/v3/objects/emails"
email_data = {
"properties": {
"hs_timestamp": datetime.now().isoformat(),
"hubspot_owner_id": "1", # This should be configurable
"hs_email_direction": "OUTBOUND",
"hs_email_status": "SEND",
"hs_email_subject": input_data.email_data.get("subject"),
"hs_email_text": input_data.email_data.get("content"),
"hs_email_to_email": input_data.email_data.get("recipient"),
}
}
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
elif input_data.operation == "track_engagement":
# Get engagement events for the contact
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
engagement_url = (
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
)
params = {"limit": 100, "after": from_date.isoformat()}
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics
metrics = {
"email_opens": 0,
"email_clicks": 0,
"email_replies": 0,
"last_engagement": None,
"engagement_score": 0,
}
for engagement in engagements.get("results", []):
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
if eng_type == "EMAIL":
metrics["email_opens"] += 1
elif eng_type == "EMAIL_CLICK":
metrics["email_clicks"] += 1
elif eng_type == "EMAIL_REPLY":
metrics["email_replies"] += 1
# Update last engagement time
eng_time = engagement.get("properties", {}).get("hs_timestamp")
if eng_time and (
not metrics["last_engagement"]
or eng_time > metrics["last_engagement"]
):
metrics["last_engagement"] = eng_time
# Calculate simple engagement score
metrics["engagement_score"] = (
metrics["email_opens"]
+ metrics["email_clicks"] * 2
+ metrics["email_replies"] * 3
)
yield "result", metrics
yield "status", "engagement_tracked"
|
__version__ = '0.13.6'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.5'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AIPluginTool
from langchain_community.tools.plugin import AIPlugin, AIPluginToolSchema, ApiConfig
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ApiConfig": "langchain_community.tools.plugin",
"AIPlugin": "langchain_community.tools.plugin",
"AIPluginToolSchema": "langchain_community.tools.plugin",
"AIPluginTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AIPlugin",
"AIPluginTool",
"AIPluginToolSchema",
"ApiConfig",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AIPluginTool
from langchain_community.tools.plugin import AIPlugin, AIPluginToolSchema, ApiConfig
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ApiConfig": "langchain_community.tools.plugin",
"AIPlugin": "langchain_community.tools.plugin",
"AIPluginToolSchema": "langchain_community.tools.plugin",
"AIPluginTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ApiConfig",
"AIPlugin",
"AIPluginToolSchema",
"AIPluginTool",
]
|
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
assert request.header.target_executor == ''
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_http_clientlet_target():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', 'nothing', {'a': 'b'})
assert request.header.target_executor == 'nothing'
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(ConnectionError):
async with WebsocketClientlet(url='ws://localhost:12345', logger=logger):
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
|
import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
assert request.header.target_executor == ''
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_http_clientlet_target():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', 'nothing', {'a': 'b'})
assert request.header.target_executor == 'nothing'
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(aiohttp.ClientError):
async with WebsocketClientlet(url='ws://localhost:12345', logger=logger):
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
|
import sys
import tempfile
from unittest.mock import patch
from keras.src.testing import test_case
from keras.src.utils import io_utils
class TestIoUtils(test_case.TestCase):
def test_enable_interactive_logging(self):
io_utils.enable_interactive_logging()
self.assertTrue(io_utils.is_interactive_logging_enabled())
def test_disable_interactive_logging(self):
io_utils.disable_interactive_logging()
self.assertFalse(io_utils.is_interactive_logging_enabled())
def test_set_logging_verbosity_valid(self):
valid_levels = ["FATAL", "ERROR", "WARNING", "INFO", "DEBUG"]
for level in valid_levels:
io_utils.set_logging_verbosity(level)
def test_set_logging_verbosity_invalid(self):
with self.assertRaises(ValueError):
io_utils.set_logging_verbosity("INVALID")
@patch("builtins.input", side_effect=["y"])
def test_ask_to_proceed_with_overwrite_yes(self, _):
self.assertTrue(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("builtins.input", side_effect=["n"])
def test_ask_to_proceed_with_overwrite_no(self, _):
self.assertFalse(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("sys.stdout.write")
def test_print_msg_interactive_with_line_break(self, mock_write):
io_utils.enable_interactive_logging()
io_utils.print_msg("Hello", line_break=True)
mock_write.assert_called_once_with("Hello\n")
@patch("sys.stdout.write")
def test_print_msg_interactive_without_line_break(self, mock_write):
io_utils.enable_interactive_logging()
io_utils.print_msg("Hello", line_break=False)
mock_write.assert_called_once_with("Hello")
@patch("absl.logging.info")
def test_print_msg_non_interactive(self, mock_logging):
io_utils.disable_interactive_logging()
io_utils.print_msg("Hello")
mock_logging.assert_called_once_with("Hello")
@patch("builtins.input", side_effect=["invalid", "invalid", "y"])
def test_ask_to_proceed_with_overwrite_invalid_then_yes(self, _):
self.assertTrue(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("builtins.input", side_effect=["invalid", "n"])
def test_ask_to_proceed_with_overwrite_invalid_then_no(self, _):
self.assertFalse(io_utils.ask_to_proceed_with_overwrite("test_path"))
def test_print_msg_with_different_encoding(self):
# https://github.com/keras-team/keras/issues/19386
io_utils.enable_interactive_logging()
self.assertTrue(io_utils.is_interactive_logging_enabled())
ori_stdout = sys.stdout
with tempfile.TemporaryFile(mode="w", encoding="cp1251") as tmp:
sys.stdout = tmp
io_utils.print_msg("━")
sys.stdout = ori_stdout
|
from unittest.mock import patch
from keras.src.testing import test_case
from keras.src.utils import io_utils
class TestIoUtils(test_case.TestCase):
def test_enable_interactive_logging(self):
io_utils.enable_interactive_logging()
self.assertTrue(io_utils.is_interactive_logging_enabled())
def test_disable_interactive_logging(self):
io_utils.disable_interactive_logging()
self.assertFalse(io_utils.is_interactive_logging_enabled())
def test_set_logging_verbosity_valid(self):
valid_levels = ["FATAL", "ERROR", "WARNING", "INFO", "DEBUG"]
for level in valid_levels:
io_utils.set_logging_verbosity(level)
def test_set_logging_verbosity_invalid(self):
with self.assertRaises(ValueError):
io_utils.set_logging_verbosity("INVALID")
@patch("builtins.input", side_effect=["y"])
def test_ask_to_proceed_with_overwrite_yes(self, _):
self.assertTrue(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("builtins.input", side_effect=["n"])
def test_ask_to_proceed_with_overwrite_no(self, _):
self.assertFalse(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("sys.stdout.write")
def test_print_msg_interactive_with_line_break(self, mock_write):
io_utils.enable_interactive_logging()
io_utils.print_msg("Hello", line_break=True)
mock_write.assert_called_once_with("Hello\n")
@patch("sys.stdout.write")
def test_print_msg_interactive_without_line_break(self, mock_write):
io_utils.enable_interactive_logging()
io_utils.print_msg("Hello", line_break=False)
mock_write.assert_called_once_with("Hello")
@patch("absl.logging.info")
def test_print_msg_non_interactive(self, mock_logging):
io_utils.disable_interactive_logging()
io_utils.print_msg("Hello")
mock_logging.assert_called_once_with("Hello")
@patch("builtins.input", side_effect=["invalid", "invalid", "y"])
def test_ask_to_proceed_with_overwrite_invalid_then_yes(self, _):
self.assertTrue(io_utils.ask_to_proceed_with_overwrite("test_path"))
@patch("builtins.input", side_effect=["invalid", "n"])
def test_ask_to_proceed_with_overwrite_invalid_then_no(self, _):
self.assertFalse(io_utils.ask_to_proceed_with_overwrite("test_path"))
|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = match.group("yaml") if match else text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
else:
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = {k: v for k, v in self.pydantic_object.schema().items()}
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
yaml_str = ""
if match:
yaml_str = match.group("yaml")
else:
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
else:
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = {k: v for k, v in self.pydantic_object.schema().items()}
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
"""Init file of LlamaIndex."""
__version__ = "0.12.33.post1"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.32"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp',
'build_dp', 'get_device'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
from .split_batch import split_batch
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch'
]
|
import logging
from typing import List
import numpy as np
from torch.utils.data import IterableDataset
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
Args:
examples (List[InputExample]): A list of InputExamples.
samples_per_label (int, optional): The number of consecutive, random, and unique samples drawn per label.
The batch size should be a multiple of samples_per_label. Defaults to 2.
with_replacement (bool, optional): If True, each sample is drawn at most once (depending on the total number
of samples per label). If False, one sample can be drawn in multiple draws, but not multiple times in
the same drawing. Defaults to False.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
""" """
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
:param examples:
a list with InputExamples
:param samples_per_label:
the number of consecutive, random and unique samples drawn per label. Batch size should be a multiple of samples_per_label
:param with_replacement:
if this is True, then each sample is drawn at most once (depending on the total number of samples per label).
if this is False, then one sample can be drawn in multiple draws, but still not multiple times in the same
drawing.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
__all__ = ['AudioNdArray']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
__all__.extend(['AudioTensorFlowTensor'])
|
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
__all__ = ['AudioNdArray']
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor'])
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import unittest
import torch
from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextProcessor
from transformers.testing_utils import (
require_vision,
)
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import LlavaNextImageProcessor
@require_vision
class LlavaNextProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaNextImageProcessor()
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaNextProcessor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_image_token_filling(self):
processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-vicuna-7b-hf")
processor.patch_size = 14
processor.vision_feature_select_strategy = "default"
# Important to check with non square image
image = torch.randint(0, 2, (3, 500, 316))
expected_image_tokens = 1526
image_token_index = 32000
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import unittest
import torch
from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextProcessor
from transformers.testing_utils import (
require_vision,
)
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import LlavaNextImageProcessor
@require_vision
class LlavaNextProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaNextImageProcessor()
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaNextProcessor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_image_token_filling(self):
processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-vicuna-7b-hf")
processor.patch_size = 14
processor.vision_feature_select_strategy = "default"
# Important to check with non square image
image = torch.randint(0, 2, (3, 500, 316))
expected_image_tokens = 1526
image_token_index = 32000
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
|
from datetime import datetime, timezone
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket
from backend.data.execution import ExecutionResult, ExecutionStatus
from backend.server.conn_manager import ConnectionManager
from backend.server.model import Methods, WsMessage
@pytest.fixture
def connection_manager() -> ConnectionManager:
return ConnectionManager()
@pytest.fixture
def mock_websocket() -> AsyncMock:
websocket: AsyncMock = AsyncMock(spec=WebSocket)
websocket.send_text = AsyncMock()
return websocket
@pytest.mark.asyncio
async def test_connect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.connect(mock_websocket)
assert mock_websocket in connection_manager.active_connections
mock_websocket.accept.assert_called_once()
def test_disconnect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.active_connections.add(mock_websocket)
connection_manager.subscriptions["test_graph"] = {mock_websocket}
connection_manager.disconnect(mock_websocket)
assert mock_websocket not in connection_manager.active_connections
assert mock_websocket not in connection_manager.subscriptions["test_graph"]
@pytest.mark.asyncio
async def test_subscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.subscribe("test_graph", mock_websocket)
assert mock_websocket in connection_manager.subscriptions["test_graph"]
@pytest.mark.asyncio
async def test_unsubscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph"] = {mock_websocket}
await connection_manager.unsubscribe("test_graph", mock_websocket)
assert "test_graph" not in connection_manager.subscriptions
@pytest.mark.asyncio
async def test_send_execution_result(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_called_once_with(
WsMessage(
method=Methods.EXECUTION_EVENT,
channel="test_graph",
data=result.model_dump(),
).model_dump_json()
)
@pytest.mark.asyncio
async def test_send_execution_result_no_subscribers(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(),
queue_time=None,
start_time=datetime.now(),
end_time=datetime.now(),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
|
from datetime import datetime, timezone
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket
from backend.data.execution import ExecutionResult, ExecutionStatus
from backend.server.conn_manager import ConnectionManager
from backend.server.model import Methods, WsMessage
@pytest.fixture
def connection_manager() -> ConnectionManager:
return ConnectionManager()
@pytest.fixture
def mock_websocket() -> AsyncMock:
websocket: AsyncMock = AsyncMock(spec=WebSocket)
websocket.send_text = AsyncMock()
return websocket
@pytest.mark.asyncio
async def test_connect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.connect(mock_websocket)
assert mock_websocket in connection_manager.active_connections
mock_websocket.accept.assert_called_once()
def test_disconnect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.active_connections.add(mock_websocket)
connection_manager.subscriptions["test_graph"] = {mock_websocket}
connection_manager.disconnect(mock_websocket)
assert mock_websocket not in connection_manager.active_connections
assert mock_websocket not in connection_manager.subscriptions["test_graph"]
@pytest.mark.asyncio
async def test_subscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.subscribe("test_graph", mock_websocket)
assert mock_websocket in connection_manager.subscriptions["test_graph"]
@pytest.mark.asyncio
async def test_unsubscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph"] = {mock_websocket}
await connection_manager.unsubscribe("test_graph", mock_websocket)
assert "test_graph" not in connection_manager.subscriptions
@pytest.mark.asyncio
async def test_send_execution_result(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_called_once_with(
WsMessage(
method=Methods.EXECUTION_EVENT,
channel="test_graph",
data=result.model_dump(),
).model_dump_json()
)
@pytest.mark.asyncio
async def test_send_execution_result_no_subscribers(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(),
queue_time=None,
start_time=datetime.now(),
end_time=datetime.now(),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
|
"""**Chat Models** are a variation on language models.
While Chat Models use language models under the hood, the interface they expose
is a bit different. Rather than expose a "text in, text out" API, they expose
an interface where "chat messages" are the inputs and outputs.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
**Main helpers:**
.. code-block::
AIMessage, BaseMessage, HumanMessage
""" # noqa: E501
import warnings
from langchain_core._api import LangChainDeprecationWarning
from langchain._api.interactive_env import is_interactive_env
from langchain.chat_models.base import init_chat_model
def __getattr__(name: str) -> None:
from langchain_community import chat_models
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing chat models from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.chat_models import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(chat_models, name)
__all__ = [
"AzureChatOpenAI",
"BedrockChat",
"ChatAnthropic",
"ChatAnyscale",
"ChatBaichuan",
"ChatCohere",
"ChatDatabricks",
"ChatEverlyAI",
"ChatFireworks",
"ChatGooglePalm",
"ChatHunyuan",
"ChatJavelinAIGateway",
"ChatKonko",
"ChatLiteLLM",
"ChatMLflowAIGateway",
"ChatMlflow",
"ChatOllama",
"ChatOpenAI",
"ChatVertexAI",
"ChatYandexGPT",
"ErnieBotChat",
"FakeListChatModel",
"GigaChat",
"HumanInputChatModel",
"JinaChat",
"MiniMaxChat",
"PaiEasChatEndpoint",
"PromptLayerChatOpenAI",
"QianfanChatEndpoint",
"VolcEngineMaasChat",
"init_chat_model",
]
|
"""**Chat Models** are a variation on language models.
While Chat Models use language models under the hood, the interface they expose
is a bit different. Rather than expose a "text in, text out" API, they expose
an interface where "chat messages" are the inputs and outputs.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
**Main helpers:**
.. code-block::
AIMessage, BaseMessage, HumanMessage
""" # noqa: E501
import warnings
from langchain_core._api import LangChainDeprecationWarning
from langchain._api.interactive_env import is_interactive_env
from langchain.chat_models.base import init_chat_model
def __getattr__(name: str) -> None:
from langchain_community import chat_models
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing chat models from langchain is deprecated. Importing from "
"langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.chat_models import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(chat_models, name)
__all__ = [
"init_chat_model",
"ChatOpenAI",
"BedrockChat",
"AzureChatOpenAI",
"FakeListChatModel",
"PromptLayerChatOpenAI",
"ChatDatabricks",
"ChatEverlyAI",
"ChatAnthropic",
"ChatCohere",
"ChatGooglePalm",
"ChatMlflow",
"ChatMLflowAIGateway",
"ChatOllama",
"ChatVertexAI",
"JinaChat",
"HumanInputChatModel",
"MiniMaxChat",
"ChatAnyscale",
"ChatLiteLLM",
"ErnieBotChat",
"ChatJavelinAIGateway",
"ChatKonko",
"PaiEasChatEndpoint",
"QianfanChatEndpoint",
"ChatFireworks",
"ChatYandexGPT",
"ChatBaichuan",
"ChatHunyuan",
"GigaChat",
"VolcEngineMaasChat",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.FlopsLoss import FlopsLoss
from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseDistillKLDivLoss import (
SparseDistillKLDivLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMSELoss import SparseMSELoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.losses.SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseDistillKLDivLoss import (
SparseDistillKLDivLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMSELoss import SparseMSELoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[Tuple[Any, BaseDataElement]]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
inputs, data_samples = data_batch # type: ignore
inputs = tensor2imgs(inputs,
**data_samples[0].get('img_norm_cfg', dict()))
for input, data_sample, output in zip(
inputs,
data_samples, # type: ignore
outputs): # type: ignore
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import GridAssigner
class TestGridAssigner(TestCase):
def test_assign(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# invalid neg_iou_thr
with self.assertRaises(AssertionError):
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=[0.3, 0.1, 0.4])
assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
# multi-neg_iou_thr
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=(0.1, 0.3))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, -1])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# gt_max_assign_all=False
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, gt_max_assign_all=False)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# large min_pos_iou
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=1)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_priors(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor(torch.empty(0, 4)),
responsible_flags=torch.empty(0))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import GridAssigner
class TestGridAssigner(TestCase):
def test_assign(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# invalid neg_iou_thr
with self.assertRaises(AssertionError):
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=[0.3, 0.1, 0.4])
assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
# multi-neg_iou_thr
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=(0.1, 0.3))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, -1])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# gt_max_assign_all=False
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, gt_max_assign_all=False)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# large min_pos_iou
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=1)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_priors(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor(torch.empty(0, 4)),
responsible_flags=torch.empty(0))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
|
from typing import Any
from llama_index.core.agent import ReActAgentWorker, StructuredPlannerAgent
from llama_index.core.agent.runner.planner import Plan, SubTask
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen
from llama_index.core.tools import FunctionTool
class MockLLM(CustomLLM):
@property
def metadata(self) -> LLMMetadata:
"""
LLM metadata.
Returns:
LLMMetadata: LLM metadata containing various information about the LLM.
"""
return LLMMetadata()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if "CREATE A PLAN" in prompt:
text = Plan(
sub_tasks=[
SubTask(
name="one", input="one", expected_output="one", dependencies=[]
),
SubTask(
name="two", input="two", expected_output="two", dependencies=[]
),
SubTask(
name="three",
input="three",
expected_output="three",
dependencies=["one", "two"],
),
]
).model_dump_json()
return CompletionResponse(text=text)
# dummy response for react
return CompletionResponse(text="Final Answer: All done")
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError
def dummy_function(a: int, b: int) -> int:
"""A dummy function that adds two numbers together."""
return a + b
def test_planner_agent() -> None:
dummy_tool = FunctionTool.from_defaults(fn=dummy_function)
dummy_llm = MockLLM()
worker = ReActAgentWorker.from_tools([dummy_tool], llm=dummy_llm)
agent = StructuredPlannerAgent(worker, tools=[dummy_tool], llm=dummy_llm)
# create a plan
plan_id = agent.create_plan("CREATE A PLAN")
plan = agent.state.plan_dict[plan_id]
assert plan is not None
assert len(plan.sub_tasks) == 3
assert len(agent.state.get_completed_sub_tasks(plan_id)) == 0
assert len(agent.state.get_remaining_subtasks(plan_id)) == 3
assert len(agent.state.get_next_sub_tasks(plan_id)) == 2
next_tasks = agent.state.get_next_sub_tasks(plan_id)
for task in next_tasks:
response = agent.run_task(task.name)
agent.state.add_completed_sub_task(plan_id, task)
assert len(agent.state.get_completed_sub_tasks(plan_id)) == 2
next_tasks = agent.state.get_next_sub_tasks(plan_id)
assert len(next_tasks) == 1
# will insert the original dummy plan again
agent.refine_plan("CREATE A PLAN", plan_id)
assert len(plan.sub_tasks) == 3
assert len(agent.state.get_completed_sub_tasks(plan_id)) == 2
assert len(agent.state.get_remaining_subtasks(plan_id)) == 1
assert len(agent.state.get_next_sub_tasks(plan_id)) == 1
|
from typing import Any
from llama_index.core.agent import ReActAgentWorker, StructuredPlannerAgent
from llama_index.core.agent.runner.planner import Plan, SubTask
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms import LLMMetadata, CompletionResponse, CompletionResponseGen
from llama_index.core.tools import FunctionTool
class MockLLM(CustomLLM):
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata.
Returns:
LLMMetadata: LLM metadata containing various information about the LLM.
"""
return LLMMetadata()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if "CREATE A PLAN" in prompt:
text = Plan(
sub_tasks=[
SubTask(
name="one", input="one", expected_output="one", dependencies=[]
),
SubTask(
name="two", input="two", expected_output="two", dependencies=[]
),
SubTask(
name="three",
input="three",
expected_output="three",
dependencies=["one", "two"],
),
]
).model_dump_json()
return CompletionResponse(text=text)
# dummy response for react
return CompletionResponse(text="Final Answer: All done")
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError
def dummy_function(a: int, b: int) -> int:
"""A dummy function that adds two numbers together."""
return a + b
def test_planner_agent() -> None:
dummy_tool = FunctionTool.from_defaults(fn=dummy_function)
dummy_llm = MockLLM()
worker = ReActAgentWorker.from_tools([dummy_tool], llm=dummy_llm)
agent = StructuredPlannerAgent(worker, tools=[dummy_tool], llm=dummy_llm)
# create a plan
plan_id = agent.create_plan("CREATE A PLAN")
plan = agent.state.plan_dict[plan_id]
assert plan is not None
assert len(plan.sub_tasks) == 3
assert len(agent.state.get_completed_sub_tasks(plan_id)) == 0
assert len(agent.state.get_remaining_subtasks(plan_id)) == 3
assert len(agent.state.get_next_sub_tasks(plan_id)) == 2
next_tasks = agent.state.get_next_sub_tasks(plan_id)
for task in next_tasks:
response = agent.run_task(task.name)
agent.state.add_completed_sub_task(plan_id, task)
assert len(agent.state.get_completed_sub_tasks(plan_id)) == 2
next_tasks = agent.state.get_next_sub_tasks(plan_id)
assert len(next_tasks) == 1
# will insert the original dummy plan again
agent.refine_plan("CREATE A PLAN", plan_id)
assert len(plan.sub_tasks) == 3
assert len(agent.state.get_completed_sub_tasks(plan_id)) == 2
assert len(agent.state.get_remaining_subtasks(plan_id)) == 1
assert len(agent.state.get_next_sub_tasks(plan_id)) == 1
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18-gflv1-r101_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # width, height
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
# img_scale is (width, height)
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # width, height
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
# img_scale is (width, height)
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
import pytest
from llama_index.core.extractors import DocumentContextExtractor
from llama_index.core.llms import ChatMessage, ChatResponse, MockLLM
from llama_index.core.schema import Document, NodeRelationship, TextNode
from llama_index.core.storage.docstore.simple_docstore import SimpleDocumentStore
@pytest.fixture()
def mock_llm():
class CustomMockLLM(MockLLM):
def chat(self, messages, **kwargs):
return ChatResponse(
message=ChatMessage(
role="assistant",
blocks=[
{
"text": f"Context for the provided chunk",
"block_type": "text",
}
],
)
)
return CustomMockLLM()
@pytest.fixture()
def sample_documents():
return [
Document(
text="This is chapter 1. It contains important information. This is a test document.",
metadata={"title": "Doc 1"},
),
Document(
text="Chapter 2 builds on previous concepts. It introduces new ideas. More test content here.",
metadata={"title": "Doc 2"},
),
]
@pytest.fixture()
def create_text_nodes():
def _create_nodes(document, texts):
doc_info = document.as_related_node_info()
return [
TextNode(
text=text,
metadata={},
relationships={NodeRelationship.SOURCE: doc_info},
)
for text in texts
]
return _create_nodes
@pytest.fixture()
def docstore(sample_documents):
docstore = SimpleDocumentStore()
for doc in sample_documents:
docstore.add_documents([doc])
return docstore
@pytest.fixture()
def context_extractor(docstore, mock_llm):
return DocumentContextExtractor(
docstore=docstore,
llm=mock_llm,
max_context_length=1000,
max_output_tokens=100,
oversized_document_strategy="error",
)
@pytest.mark.asyncio
async def test_context_extraction_basic(
context_extractor, sample_documents, create_text_nodes
):
doc = sample_documents[0]
nodes = create_text_nodes(
doc, ["This is chapter 1.", "It contains important information."]
)
try:
metadata_list = await context_extractor.aextract(nodes)
print("METADATA LIST: ", metadata_list)
if metadata_list is None:
raise ValueError("context_extractor.aextract() returned None")
assert len(metadata_list) == len(nodes)
for metadata in metadata_list:
assert "context" in metadata
assert metadata["context"] == "Context for the provided chunk"
except Exception as e:
print(f"Error during extraction: {e!s}")
raise
def test_invalid_oversized_strategy():
with pytest.raises(ValueError):
DocumentContextExtractor(
docstore=SimpleDocumentStore(),
llm=MockLLM(),
max_context_length=1000,
max_output_tokens=100,
oversized_document_strategy="invalid_strategy",
)
@pytest.mark.asyncio
async def test_context_extraction_oversized_document(create_text_nodes):
large_doc = Document(
text="This is a very long document. " * 1000, metadata={"title": "Large Doc"}
)
docstore = SimpleDocumentStore()
docstore.add_documents([large_doc])
extractor = DocumentContextExtractor(
docstore=docstore,
llm=MockLLM(),
max_context_length=100, # Small limit to trigger error
max_output_tokens=50,
oversized_document_strategy="error",
)
nodes = create_text_nodes(large_doc, ["This is a test chunk."])
with pytest.raises(ValueError):
await extractor.aextract(nodes)
@pytest.mark.asyncio
async def test_context_extraction_custom_prompt(
docstore, mock_llm, sample_documents, create_text_nodes
):
custom_prompt = "Generate a detailed context for this chunk:"
extractor = DocumentContextExtractor(
docstore=docstore,
llm=mock_llm,
prompt=DocumentContextExtractor.ORIGINAL_CONTEXT_PROMPT,
max_context_length=1000,
max_output_tokens=100,
)
nodes = create_text_nodes(sample_documents[0], ["Test chunk"])
metadata_list = await extractor.aextract(nodes)
assert len(metadata_list) == 1
assert "context" in metadata_list[0]
@pytest.mark.asyncio
async def test_multiple_documents_context(
context_extractor, sample_documents, create_text_nodes
):
# Create nodes from different documents
nodes = create_text_nodes(
sample_documents[0], ["This is chapter 1."]
) + create_text_nodes(
sample_documents[1], ["Chapter 2 builds on previous concepts."]
)
metadata_list = await context_extractor.aextract(nodes)
assert len(metadata_list) == 2
for metadata in metadata_list:
assert "context" in metadata
|
import pytest
from llama_index.core.extractors import DocumentContextExtractor
from llama_index.core.llms import ChatMessage, ChatResponse, MockLLM
from llama_index.core.schema import Document, NodeRelationship, TextNode
from llama_index.core.storage.docstore.simple_docstore import SimpleDocumentStore
@pytest.fixture()
def mock_llm():
class CustomMockLLM(MockLLM):
def chat(self, messages, **kwargs):
return ChatResponse(
message=ChatMessage(
role="assistant",
blocks=[
{
"text": f"Context for the provided chunk",
"block_type": "text",
}
],
)
)
return CustomMockLLM()
@pytest.fixture()
def sample_documents():
return [
Document(
text="This is chapter 1. It contains important information. This is a test document.",
metadata={"title": "Doc 1"},
),
Document(
text="Chapter 2 builds on previous concepts. It introduces new ideas. More test content here.",
metadata={"title": "Doc 2"},
),
]
@pytest.fixture()
def create_text_nodes():
def _create_nodes(document, texts):
doc_info = document.as_related_node_info()
return [
TextNode(
text=text,
metadata={},
relationships={NodeRelationship.SOURCE: doc_info},
)
for text in texts
]
return _create_nodes
@pytest.fixture()
def docstore(sample_documents):
docstore = SimpleDocumentStore()
for doc in sample_documents:
docstore.add_documents([doc])
return docstore
@pytest.fixture()
def context_extractor(docstore, mock_llm):
return DocumentContextExtractor(
docstore=docstore,
llm=mock_llm,
max_context_length=1000,
max_output_tokens=100,
oversized_document_strategy="error",
)
@pytest.mark.asyncio()
async def test_context_extraction_basic(
context_extractor, sample_documents, create_text_nodes
):
doc = sample_documents[0]
nodes = create_text_nodes(
doc, ["This is chapter 1.", "It contains important information."]
)
try:
metadata_list = await context_extractor.aextract(nodes)
print("METADATA LIST: ", metadata_list)
if metadata_list is None:
raise ValueError("context_extractor.aextract() returned None")
assert len(metadata_list) == len(nodes)
for metadata in metadata_list:
assert "context" in metadata
assert metadata["context"] == "Context for the provided chunk"
except Exception as e:
print(f"Error during extraction: {e!s}")
raise
def test_invalid_oversized_strategy():
with pytest.raises(ValueError):
DocumentContextExtractor(
docstore=SimpleDocumentStore(),
llm=MockLLM(),
max_context_length=1000,
max_output_tokens=100,
oversized_document_strategy="invalid_strategy",
)
@pytest.mark.asyncio()
async def test_context_extraction_oversized_document(create_text_nodes):
large_doc = Document(
text="This is a very long document. " * 1000, metadata={"title": "Large Doc"}
)
docstore = SimpleDocumentStore()
docstore.add_documents([large_doc])
extractor = DocumentContextExtractor(
docstore=docstore,
llm=MockLLM(),
max_context_length=100, # Small limit to trigger error
max_output_tokens=50,
oversized_document_strategy="error",
)
nodes = create_text_nodes(large_doc, ["This is a test chunk."])
with pytest.raises(ValueError):
await extractor.aextract(nodes)
@pytest.mark.asyncio()
async def test_context_extraction_custom_prompt(
docstore, mock_llm, sample_documents, create_text_nodes
):
custom_prompt = "Generate a detailed context for this chunk:"
extractor = DocumentContextExtractor(
docstore=docstore,
llm=mock_llm,
prompt=DocumentContextExtractor.ORIGINAL_CONTEXT_PROMPT,
max_context_length=1000,
max_output_tokens=100,
)
nodes = create_text_nodes(sample_documents[0], ["Test chunk"])
metadata_list = await extractor.aextract(nodes)
assert len(metadata_list) == 1
assert "context" in metadata_list[0]
@pytest.mark.asyncio()
async def test_multiple_documents_context(
context_extractor, sample_documents, create_text_nodes
):
# Create nodes from different documents
nodes = create_text_nodes(
sample_documents[0], ["This is chapter 1."]
) + create_text_nodes(
sample_documents[1], ["Chapter 2 builds on previous concepts."]
)
metadata_list = await context_extractor.aextract(nodes)
assert len(metadata_list) == 2
for metadata in metadata_list:
assert "context" in metadata
|
import json
from typing import Any, Dict, Optional, Tuple
from llama_index.core.schema import (
BaseNode,
ImageNode,
IndexNode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
DEFAULT_TEXT_KEY = "text"
DEFAULT_EMBEDDING_KEY = "embedding"
DEFAULT_DOC_ID_KEY = "doc_id"
def _validate_is_flat_dict(metadata_dict: dict) -> None:
"""
Validate that metadata dict is flat,
and key is str, and value is one of (str, int, float, None).
"""
for key, val in metadata_dict.items():
if not isinstance(key, str):
raise ValueError("Metadata key must be str!")
if not isinstance(val, (str, int, float, type(None))):
raise ValueError(
f"Value for metadata {key} must be one of (str, int, float, None)"
)
def node_to_metadata_dict(
node: BaseNode,
remove_text: bool = False,
text_field: str = DEFAULT_TEXT_KEY,
flat_metadata: bool = False,
) -> Dict[str, Any]:
"""Common logic for saving Node data into metadata dict."""
# Using mode="json" here because BaseNode may have fields of type bytes (e.g. images in ImageBlock),
# which would cause serialization issues.
node_dict = node.model_dump(mode="json")
metadata: Dict[str, Any] = node_dict.get("metadata", {})
if flat_metadata:
_validate_is_flat_dict(metadata)
# store entire node as json string - some minor text duplication
if remove_text:
node_dict[text_field] = ""
# remove embedding from node_dict
node_dict["embedding"] = None
# dump remainder of node_dict to json string
metadata["_node_content"] = json.dumps(node_dict)
metadata["_node_type"] = node.class_name()
# store ref doc id at top level to allow metadata filtering
# kept for backwards compatibility, will consolidate in future
metadata["document_id"] = node.ref_doc_id or "None" # for Chroma
metadata["doc_id"] = node.ref_doc_id or "None" # for Pinecone, Qdrant, Redis
metadata["ref_doc_id"] = node.ref_doc_id or "None" # for Weaviate
return metadata
def metadata_dict_to_node(metadata: dict, text: Optional[str] = None) -> BaseNode:
"""Common logic for loading Node data from metadata dict."""
node_json = metadata.get("_node_content", None)
node_type = metadata.get("_node_type", None)
if node_json is None:
raise ValueError("Node content not found in metadata dict.")
node: BaseNode
if node_type == IndexNode.class_name():
node = IndexNode.from_json(node_json)
elif node_type == ImageNode.class_name():
node = ImageNode.from_json(node_json)
else:
node = TextNode.from_json(node_json)
if text is not None:
node.set_content(text)
return node
# TODO: Deprecated conversion functions
def legacy_metadata_dict_to_node(
metadata: dict, text_key: str = DEFAULT_TEXT_KEY
) -> Tuple[dict, dict, dict]:
"""Common logic for loading Node data from metadata dict."""
# make a copy first
if metadata is None:
metadata = {}
else:
metadata = metadata.copy()
# load node_info from json string
node_info_str = metadata.pop("node_info", "")
if node_info_str == "":
node_info = {}
else:
node_info = json.loads(node_info_str)
# load relationships from json string
relationships_str = metadata.pop("relationships", "")
relationships: Dict[NodeRelationship, RelatedNodeInfo]
if relationships_str == "":
relationships = {}
else:
relationships = {
NodeRelationship(k): RelatedNodeInfo(node_id=str(v))
for k, v in json.loads(relationships_str).items()
}
# remove other known fields
metadata.pop(text_key, None)
id_ = metadata.pop("id", None)
document_id = metadata.pop("document_id", None)
doc_id = metadata.pop("doc_id", None)
ref_doc_id = metadata.pop("ref_doc_id", None)
# don't remove id's from metadata that llama-index doesn't know about
ref_doc_id_info = relationships.get(NodeRelationship.PARENT, None)
if ref_doc_id_info is not None:
ref_doc_id = ref_doc_id_info.node_id
if id_ is not None and id_ != ref_doc_id:
metadata["id"] = id_
if document_id is not None and document_id != ref_doc_id:
metadata["document_id"] = document_id
if doc_id is not None and doc_id != ref_doc_id:
metadata["doc_id"] = doc_id
# remaining metadata is metadata or node_info
new_metadata = {}
for key, val in metadata.items():
# don't enforce types on metadata anymore (we did in the past)
# since how we store this data now has been updated
new_metadata[key] = val
return new_metadata, node_info, relationships
|
import json
from typing import Any, Dict, Optional, Tuple
from llama_index.core.schema import (
BaseNode,
ImageNode,
IndexNode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
DEFAULT_TEXT_KEY = "text"
DEFAULT_EMBEDDING_KEY = "embedding"
DEFAULT_DOC_ID_KEY = "doc_id"
def _validate_is_flat_dict(metadata_dict: dict) -> None:
"""
Validate that metadata dict is flat,
and key is str, and value is one of (str, int, float, None).
"""
for key, val in metadata_dict.items():
if not isinstance(key, str):
raise ValueError("Metadata key must be str!")
if not isinstance(val, (str, int, float, type(None))):
raise ValueError(
f"Value for metadata {key} must be one of (str, int, float, None)"
)
def node_to_metadata_dict(
node: BaseNode,
remove_text: bool = False,
text_field: str = DEFAULT_TEXT_KEY,
flat_metadata: bool = False,
) -> Dict[str, Any]:
"""Common logic for saving Node data into metadata dict."""
node_dict = node.dict()
metadata: Dict[str, Any] = node_dict.get("metadata", {})
if flat_metadata:
_validate_is_flat_dict(metadata)
# store entire node as json string - some minor text duplication
if remove_text:
node_dict[text_field] = ""
# remove embedding from node_dict
node_dict["embedding"] = None
# dump remainder of node_dict to json string
metadata["_node_content"] = json.dumps(node_dict)
metadata["_node_type"] = node.class_name()
# store ref doc id at top level to allow metadata filtering
# kept for backwards compatibility, will consolidate in future
metadata["document_id"] = node.ref_doc_id or "None" # for Chroma
metadata["doc_id"] = node.ref_doc_id or "None" # for Pinecone, Qdrant, Redis
metadata["ref_doc_id"] = node.ref_doc_id or "None" # for Weaviate
return metadata
def metadata_dict_to_node(metadata: dict, text: Optional[str] = None) -> BaseNode:
"""Common logic for loading Node data from metadata dict."""
node_json = metadata.get("_node_content", None)
node_type = metadata.get("_node_type", None)
if node_json is None:
raise ValueError("Node content not found in metadata dict.")
node: BaseNode
if node_type == IndexNode.class_name():
node = IndexNode.from_json(node_json)
elif node_type == ImageNode.class_name():
node = ImageNode.from_json(node_json)
else:
node = TextNode.from_json(node_json)
if text is not None:
node.set_content(text)
return node
# TODO: Deprecated conversion functions
def legacy_metadata_dict_to_node(
metadata: dict, text_key: str = DEFAULT_TEXT_KEY
) -> Tuple[dict, dict, dict]:
"""Common logic for loading Node data from metadata dict."""
# make a copy first
if metadata is None:
metadata = {}
else:
metadata = metadata.copy()
# load node_info from json string
node_info_str = metadata.pop("node_info", "")
if node_info_str == "":
node_info = {}
else:
node_info = json.loads(node_info_str)
# load relationships from json string
relationships_str = metadata.pop("relationships", "")
relationships: Dict[NodeRelationship, RelatedNodeInfo]
if relationships_str == "":
relationships = {}
else:
relationships = {
NodeRelationship(k): RelatedNodeInfo(node_id=str(v))
for k, v in json.loads(relationships_str).items()
}
# remove other known fields
metadata.pop(text_key, None)
id_ = metadata.pop("id", None)
document_id = metadata.pop("document_id", None)
doc_id = metadata.pop("doc_id", None)
ref_doc_id = metadata.pop("ref_doc_id", None)
# don't remove id's from metadata that llama-index doesn't know about
ref_doc_id_info = relationships.get(NodeRelationship.PARENT, None)
if ref_doc_id_info is not None:
ref_doc_id = ref_doc_id_info.node_id
if id_ is not None and id_ != ref_doc_id:
metadata["id"] = id_
if document_id is not None and document_id != ref_doc_id:
metadata["document_id"] = document_id
if doc_id is not None and doc_id != ref_doc_id:
metadata["doc_id"] = doc_id
# remaining metadata is metadata or node_info
new_metadata = {}
for key, val in metadata.items():
# don't enforce types on metadata anymore (we did in the past)
# since how we store this data now has been updated
new_metadata[key] = val
return new_metadata, node_info, relationships
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data (see
:ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for an
example of classification based on the attributes in this dataset). It simply shows the
kernel density estimate of observed data points in geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<https://www.iucnredlist.org/species/3038/47437046>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
- `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_species_distributions
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ["Bradypus Variegatus", "Microryzomys Minutus"]
Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T
ytrain = np.array(
[d.decode("ascii").startswith("micro") for d in data["train"]["species"]],
dtype="int",
)
Xtrain *= np.pi / 180.0 # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.0
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(
bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree"
)
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = np.full(land_mask.shape[0], -9999, dtype="int")
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(
projection="cyl",
llcrnrlat=Y.min(),
urcrnrlat=Y.max(),
llcrnrlon=X.min(),
urcrnrlon=X.max(),
resolution="c",
)
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
)
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data (see
:ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for an
example of classification based on the attributes in this dataset). It simply shows the
kernel density estimate of observed data points in geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<https://www.iucnredlist.org/species/3038/47437046>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
- `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
""" # noqa: E501
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_species_distributions
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ["Bradypus Variegatus", "Microryzomys Minutus"]
Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T
ytrain = np.array(
[d.decode("ascii").startswith("micro") for d in data["train"]["species"]],
dtype="int",
)
Xtrain *= np.pi / 180.0 # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.0
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(
bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree"
)
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = np.full(land_mask.shape[0], -9999, dtype="int")
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(
projection="cyl",
llcrnrlat=Y.min(),
urcrnrlat=Y.max(),
llcrnrlon=X.min(),
urcrnrlon=X.max(),
resolution="c",
)
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
)
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
from __future__ import annotations
import json
from typing import Optional, Type
import requests
import yaml
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel
class ApiConfig(BaseModel):
"""API Configuration."""
type: str
url: str
has_user_authentication: Optional[bool] = False
class AIPlugin(BaseModel):
"""AI Plugin Definition."""
schema_version: str
name_for_model: str
name_for_human: str
description_for_model: str
description_for_human: str
auth: Optional[dict] = None
api: ApiConfig
logo_url: Optional[str]
contact_email: Optional[str]
legal_info_url: Optional[str]
@classmethod
def from_url(cls, url: str) -> AIPlugin:
"""Instantiate AIPlugin from a URL."""
response = requests.get(url).json()
return cls(**response)
def marshal_spec(txt: str) -> dict:
"""Convert the yaml or json serialized spec to a dict.
Args:
txt: The yaml or json serialized spec.
Returns:
dict: The spec as a dict.
"""
try:
return json.loads(txt)
except json.JSONDecodeError:
return yaml.safe_load(txt)
class AIPluginToolSchema(BaseModel):
"""Schema for AIPluginTool."""
tool_input: Optional[str] = ""
class AIPluginTool(BaseTool):
"""Tool for getting the OpenAPI spec for an AI Plugin."""
plugin: AIPlugin
api_spec: str
args_schema: Type[AIPluginToolSchema] = AIPluginToolSchema
@classmethod
def from_plugin_url(cls, url: str) -> AIPluginTool:
plugin = AIPlugin.from_url(url)
description = (
f"Call this tool to get the OpenAPI spec (and usage guide) "
f"for interacting with the {plugin.name_for_human} API. "
f"You should only call this ONCE! What is the "
f"{plugin.name_for_human} API useful for? "
) + plugin.description_for_human
open_api_spec_str = requests.get(plugin.api.url).text
open_api_spec = marshal_spec(open_api_spec_str)
api_spec = (
f"Usage Guide: {plugin.description_for_model}\n\n"
f"OpenAPI Spec: {open_api_spec}"
)
return cls(
name=plugin.name_for_model,
description=description,
plugin=plugin,
api_spec=api_spec,
)
def _run(
self,
tool_input: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_spec
async def _arun(
self,
tool_input: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return self.api_spec
|
from __future__ import annotations
import json
from typing import Optional, Type
import requests
import yaml
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel
class ApiConfig(BaseModel):
"""API Configuration."""
type: str
url: str
has_user_authentication: Optional[bool] = False
class AIPlugin(BaseModel):
"""AI Plugin Definition."""
schema_version: str
name_for_model: str
name_for_human: str
description_for_model: str
description_for_human: str
auth: Optional[dict] = None
api: ApiConfig
logo_url: Optional[str]
contact_email: Optional[str]
legal_info_url: Optional[str]
@classmethod
def from_url(cls, url: str) -> AIPlugin:
"""Instantiate AIPlugin from a URL."""
response = requests.get(url).json()
return cls(**response)
def marshal_spec(txt: str) -> dict:
"""Convert the yaml or json serialized spec to a dict.
Args:
txt: The yaml or json serialized spec.
Returns:
dict: The spec as a dict.
"""
try:
return json.loads(txt)
except json.JSONDecodeError:
return yaml.safe_load(txt)
class AIPluginToolSchema(BaseModel):
"""Schema for AIPluginTool."""
tool_input: Optional[str] = ""
class AIPluginTool(BaseTool): # type: ignore[override, override]
"""Tool for getting the OpenAPI spec for an AI Plugin."""
plugin: AIPlugin
api_spec: str
args_schema: Type[AIPluginToolSchema] = AIPluginToolSchema
@classmethod
def from_plugin_url(cls, url: str) -> AIPluginTool:
plugin = AIPlugin.from_url(url)
description = (
f"Call this tool to get the OpenAPI spec (and usage guide) "
f"for interacting with the {plugin.name_for_human} API. "
f"You should only call this ONCE! What is the "
f"{plugin.name_for_human} API useful for? "
) + plugin.description_for_human
open_api_spec_str = requests.get(plugin.api.url).text
open_api_spec = marshal_spec(open_api_spec_str)
api_spec = (
f"Usage Guide: {plugin.description_for_model}\n\n"
f"OpenAPI Spec: {open_api_spec}"
)
return cls(
name=plugin.name_for_model,
description=description,
plugin=plugin,
api_spec=api_spec,
)
def _run(
self,
tool_input: Optional[str] = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_spec
async def _arun(
self,
tool_input: Optional[str] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return self.api_spec
|
from langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.log import format_log_to_str
def test_single_agent_action_observation() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
]
expected_result = "Log1\nObservation: Observation1\nThought: "
assert format_log_to_str(intermediate_steps) == expected_result
def test_multiple_agent_actions_observations() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
(AgentAction(tool="Tool2", tool_input="input2", log="Log2"), "Observation2"),
(AgentAction(tool="Tool3", tool_input="input3", log="Log3"), "Observation3"),
]
expected_result = """Log1\nObservation: Observation1\nThought: \
Log2\nObservation: Observation2\nThought: Log3\nObservation: \
Observation3\nThought: """
assert format_log_to_str(intermediate_steps) == expected_result
def test_custom_prefixes() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
]
observation_prefix = "Custom Observation: "
llm_prefix = "Custom Thought: "
expected_result = "Log1\nCustom Observation: Observation1\nCustom Thought: "
assert (
format_log_to_str(intermediate_steps, observation_prefix, llm_prefix)
== expected_result
)
def test_empty_intermediate_steps() -> None:
output = format_log_to_str([])
assert output == ""
|
from langchain_core.agents import AgentAction
from langchain.agents.format_scratchpad.log import format_log_to_str
def test_single_agent_action_observation() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
expected_result = "Log1\nObservation: Observation1\nThought: "
assert format_log_to_str(intermediate_steps) == expected_result
def test_multiple_agent_actions_observations() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
(AgentAction(tool="Tool2", tool_input="input2", log="Log2"), "Observation2"),
(AgentAction(tool="Tool3", tool_input="input3", log="Log3"), "Observation3"),
]
expected_result = """Log1\nObservation: Observation1\nThought: \
Log2\nObservation: Observation2\nThought: Log3\nObservation: \
Observation3\nThought: """
assert format_log_to_str(intermediate_steps) == expected_result
def test_custom_prefixes() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
observation_prefix = "Custom Observation: "
llm_prefix = "Custom Thought: "
expected_result = "Log1\nCustom Observation: Observation1\nCustom Thought: "
assert (
format_log_to_str(intermediate_steps, observation_prefix, llm_prefix)
== expected_result
)
def test_empty_intermediate_steps() -> None:
output = format_log_to_str([])
assert output == ""
|
"""Azure Translate tool spec."""
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
ENDPOINT_BASE_URL = "https://api.cognitive.microsofttranslator.com/translate"
class AzureTranslateToolSpec(BaseToolSpec):
"""Azure Translate tool spec."""
spec_functions = ["translate"]
def __init__(self, api_key: str, region: str) -> None:
"""Initialize with parameters."""
self.headers = {
"Ocp-Apim-Subscription-Key": api_key,
"Ocp-Apim-Subscription-Region": region,
"Content-type": "application/json",
}
def translate(self, text: str, language: str):
"""
Use this tool to translate text from one language to another.
The source language will be automatically detected. You need to specify the target language
using a two character language code.
Args:
language (str): Target translation language.
"""
request = requests.post(
ENDPOINT_BASE_URL,
params={"api-version": "3.0", "to": language},
headers=self.headers,
json=[{"text": text}],
)
return request.json()
|
"""Azure Translate tool spec."""
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
ENDPOINT_BASE_URL = "https://api.cognitive.microsofttranslator.com/translate"
class AzureTranslateToolSpec(BaseToolSpec):
"""Azure Translate tool spec."""
spec_functions = ["translate"]
def __init__(self, api_key: str, region: str) -> None:
"""Initialize with parameters."""
self.headers = {
"Ocp-Apim-Subscription-Key": api_key,
"Ocp-Apim-Subscription-Region": region,
"Content-type": "application/json",
}
def translate(self, text: str, language: str):
"""
Use this tool to translate text from one language to another.
The source language will be automatically detected. You need to specify the target language
using a two character language code.
Args:
language (str): Target translation language.
"""
request = requests.post(
ENDPOINT_BASE_URL,
params={"api-version": "3.0", "to": language},
headers=self.headers,
json=[{"text": text}],
)
return request.json()
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., anchors.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,
e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
return decoded_bboxes
|
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
|
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={
"enable_logs": True,
},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
from mmcv import Config
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.utils import get_root_logger
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
if 'flip' in cfg.data.test.pipeline[1]:
cfg.data.test.pipeline[1].flip = True
else:
if logger is not None:
logger.error(f'{config_name}: unable to start aug test')
else:
print(f'{config_name}: unable to start aug test', flush=True)
model = init_detector(cfg, checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show:
show_result_pyplot(
model,
args.img,
result,
score_thr=args.score_thr,
wait_time=args.wait_time)
return result
# Sample test whether the inference code is correct
def main(args):
config = Config.fromfile(args.config)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = get_root_logger(
log_file='benchmark_test_image.log', log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
|
import logging
import os.path as osp
from argparse import ArgumentParser
from mmcv import Config
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.utils import get_root_logger
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
if 'flip' in cfg.data.test.pipeline[1]:
cfg.data.test.pipeline[1].flip = True
else:
if logger is not None:
logger.error(f'{config_name}: unable to start aug test')
else:
print(f'{config_name}: unable to start aug test', flush=True)
model = init_detector(cfg, checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show:
show_result_pyplot(
model,
args.img,
result,
score_thr=args.score_thr,
wait_time=args.wait_time)
return result
# Sample test whether the inference code is correct
def main(args):
config = Config.fromfile(args.config)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = get_root_logger(
log_file='benchmark_test_image.log', log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestPISARoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_pisa_roi_head(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestPISARoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_pisa_roi_head(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .folder import find_classes, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Imagenette(VisionDataset):
"""`Imagenette <https://github.com/fastai/imagenette#imagenette-1>`_ image classification dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the Imagenette dataset.
split (string, optional): The dataset split. Supports ``"train"`` (default), and ``"val"``.
size (string, optional): The image size. Supports ``"full"`` (default), ``"320px"``, and ``"160px"``.
download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
downloaded archives are not downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version, e.g. ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class name, class index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (WordNet ID, class index).
"""
_ARCHIVES = {
"full": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz", "fe2fc210e6bb7c5664d602c3cd71e612"),
"320px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz", "3df6f0d01a2c9592104656642f5e78a3"),
"160px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-160.tgz", "e793b78cc4c9e9a4ccc0c1155377a412"),
}
_WNID_TO_CLASS = {
"n01440764": ("tench", "Tinca tinca"),
"n02102040": ("English springer", "English springer spaniel"),
"n02979186": ("cassette player",),
"n03000684": ("chain saw", "chainsaw"),
"n03028079": ("church", "church building"),
"n03394916": ("French horn", "horn"),
"n03417042": ("garbage truck", "dustcart"),
"n03425413": ("gas pump", "gasoline pump", "petrol pump", "island dispenser"),
"n03445777": ("golf ball",),
"n03888257": ("parachute", "chute"),
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
size: str = "full",
download=False,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ["train", "val"])
self._size = verify_str_arg(size, "size", ["full", "320px", "160px"])
self._url, self._md5 = self._ARCHIVES[self._size]
self._size_root = Path(self.root) / Path(self._url).stem
self._image_root = str(self._size_root / self._split)
if download:
self._download()
elif not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
self.wnids, self.wnid_to_idx = find_classes(self._image_root)
self.classes = [self._WNID_TO_CLASS[wnid] for wnid in self.wnids]
self.class_to_idx = {
class_name: idx for wnid, idx in self.wnid_to_idx.items() for class_name in self._WNID_TO_CLASS[wnid]
}
self._samples = make_dataset(self._image_root, self.wnid_to_idx, extensions=".jpeg")
def _check_exists(self) -> bool:
return self._size_root.exists()
def _download(self):
if self._check_exists():
return
download_and_extract_archive(self._url, self.root, md5=self._md5)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
path, label = self._samples[idx]
image = Image.open(path).convert("RGB")
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
label = self.target_transform(label)
return image, label
def __len__(self) -> int:
return len(self._samples)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .folder import find_classes, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Imagenette(VisionDataset):
"""`Imagenette <https://github.com/fastai/imagenette#imagenette-1>`_ image classification dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the Imagenette dataset.
split (string, optional): The dataset split. Supports ``"train"`` (default), and ``"val"``.
size (string, optional): The image size. Supports ``"full"`` (default), ``"320px"``, and ``"160px"``.
download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
downloaded archives are not downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version, e.g. ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class name, class index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (WordNet ID, class index).
"""
_ARCHIVES = {
"full": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz", "fe2fc210e6bb7c5664d602c3cd71e612"),
"320px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz", "3df6f0d01a2c9592104656642f5e78a3"),
"160px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-160.tgz", "e793b78cc4c9e9a4ccc0c1155377a412"),
}
_WNID_TO_CLASS = {
"n01440764": ("tench", "Tinca tinca"),
"n02102040": ("English springer", "English springer spaniel"),
"n02979186": ("cassette player",),
"n03000684": ("chain saw", "chainsaw"),
"n03028079": ("church", "church building"),
"n03394916": ("French horn", "horn"),
"n03417042": ("garbage truck", "dustcart"),
"n03425413": ("gas pump", "gasoline pump", "petrol pump", "island dispenser"),
"n03445777": ("golf ball",),
"n03888257": ("parachute", "chute"),
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
size: str = "full",
download=False,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ["train", "val"])
self._size = verify_str_arg(size, "size", ["full", "320px", "160px"])
self._url, self._md5 = self._ARCHIVES[self._size]
self._size_root = Path(self.root) / Path(self._url).stem
self._image_root = str(self._size_root / self._split)
if download:
self._download()
elif not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
self.wnids, self.wnid_to_idx = find_classes(self._image_root)
self.classes = [self._WNID_TO_CLASS[wnid] for wnid in self.wnids]
self.class_to_idx = {
class_name: idx for wnid, idx in self.wnid_to_idx.items() for class_name in self._WNID_TO_CLASS[wnid]
}
self._samples = make_dataset(self._image_root, self.wnid_to_idx, extensions=".jpeg")
def _check_exists(self) -> bool:
return self._size_root.exists()
def _download(self):
if self._check_exists():
raise RuntimeError(
f"The directory {self._size_root} already exists. "
f"If you want to re-download or re-extract the images, delete the directory."
)
download_and_extract_archive(self._url, self.root, md5=self._md5)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
path, label = self._samples[idx]
image = Image.open(path).convert("RGB")
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
label = self.target_transform(label)
return image, label
def __len__(self) -> int:
return len(self._samples)
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredTSVLoader(UnstructuredFileLoader):
"""Load `TSV` files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredTSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the TSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain_community.document_loaders.tsv import UnstructuredTSVLoader
loader = UnstructuredTSVLoader("stanley-cups.tsv", mode="elements")
docs = loader.load()
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.7.6")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.tsv import partition_tsv
return partition_tsv(filename=self.file_path, **self.unstructured_kwargs)
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredTSVLoader(UnstructuredFileLoader):
"""Load `TSV` files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredTSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the TSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain_community.document_loaders.tsv import UnstructuredTSVLoader
loader = UnstructuredTSVLoader("stanley-cups.tsv", mode="elements")
docs = loader.load()
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.7.6")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.tsv import partition_tsv
return partition_tsv(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, Union
from pydantic import model_validator
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
if TYPE_CHECKING:
from typing_extensions import Self
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
try:
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
if isinstance(block, dict) and "text" in block:
text = block["text"]
break
else:
pass
self.text = text
except (KeyError, AttributeError) as e:
msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, Union
from pydantic import model_validator
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
if TYPE_CHECKING:
from typing_extensions import Self
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
try:
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# HACK: Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
if isinstance(block, dict) and "text" in block:
text = block["text"]
break
else:
pass
self.text = text
except (KeyError, AttributeError) as e:
msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
if isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import GradientLLM
from langchain_community.llms.gradient_ai import TrainResult
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TrainResult": "langchain_community.llms.gradient_ai",
"GradientLLM": "langchain_community.llms",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GradientLLM",
"TrainResult",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import GradientLLM
from langchain_community.llms.gradient_ai import TrainResult
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TrainResult": "langchain_community.llms.gradient_ai",
"GradientLLM": "langchain_community.llms",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TrainResult",
"GradientLLM",
]
|
import os
import warnings
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
warnings.warn(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS, _load_library
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
try:
_load_library("Decoder")
_HAS_GPU_VIDEO_DECODER = True
except (ImportError, OSError, ModuleNotFoundError):
_HAS_GPU_VIDEO_DECODER = False
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not _HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDoc):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocList[Image], **kwargs) -> DocList[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocList[Image]([Image(url='https://via.placeholder.com/150.png')]),
return_type=DocList[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocList[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(on='/foo', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocList[MyDoc],
response_schema=DocList[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.doc_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(on='/foo', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert docs[0].text == 'hello world'
assert docs.__class__.doc_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[MyDoc], **kwargs) -> DocList[MyDoc]:
assert docs.__class__.doc_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(on='/bar', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert docs[0].text == 'hello world'
assert docs.__class__.doc_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[InputDoc], **kwargs) -> DocList[OutputDoc]:
docs_return = DocList[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocList[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.doc_type == OutputDoc
def test_deployments():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[InputDoc], **kwargs) -> DocList[OutputDoc]:
docs_return = DocList[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocList[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.doc_type == OutputDoc
|
from typing import Optional
import numpy as np
from docarray import BaseDoc
from docarray import DocArray as DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDoc):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[Image], **kwargs) -> DocumentArray[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocumentArray[Image](
[Image(url='https://via.placeholder.com/150.png')]
),
return_type=DocumentArray[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocumentArray[MyDoc],
response_schema=DocumentArray[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocumentArray[MyDoc], **kwargs) -> DocumentArray[MyDoc]:
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
def test_deployments():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
__all__ = ["LoggingCallbackHandler"]
import logging
from typing import Any, Optional
from uuid import UUID
from langchain_core.exceptions import TracerException
from langchain_core.tracers.stdout import FunctionCallbackHandler
from langchain_core.utils.input import get_bolded_text, get_colored_text
class LoggingCallbackHandler(FunctionCallbackHandler):
"""Tracer that logs via the input Logger."""
name: str = "logging_callback_handler"
def __init__(
self,
logger: logging.Logger,
log_level: int = logging.INFO,
extra: Optional[dict] = None,
**kwargs: Any,
) -> None:
log_method = getattr(logger, logging.getLevelName(level=log_level).lower())
def callback(text: str) -> None:
log_method(text, extra=extra)
super().__init__(function=callback, **kwargs)
def on_text(
self,
text: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
try:
crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] "
except TracerException:
crumbs_str = ""
self.function_callback(
f"{get_colored_text('[text]', color='blue')}"
f" {get_bolded_text(f'{crumbs_str}New text:')}\n{text}",
)
|
__all__ = ["LoggingCallbackHandler"]
import logging
from typing import Any, Optional
from uuid import UUID
from langchain_core.exceptions import TracerException
from langchain_core.tracers.stdout import FunctionCallbackHandler
from langchain_core.utils.input import get_bolded_text, get_colored_text
class LoggingCallbackHandler(FunctionCallbackHandler):
"""Tracer that logs via the input Logger."""
name: str = "logging_callback_handler"
def __init__(
self,
logger: logging.Logger,
log_level: int = logging.INFO,
extra: Optional[dict] = None,
**kwargs: Any,
) -> None:
log_method = getattr(logger, logging.getLevelName(level=log_level).lower())
def callback(text: str) -> None:
log_method(text, extra=extra)
super().__init__(function=callback, **kwargs)
def on_text(
self,
text: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
try:
crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] "
except TracerException:
crumbs_str = ""
self.function_callback(
f"{get_colored_text('[text]', color='blue')}"
f" {get_bolded_text(f'{crumbs_str}New text:')}\n{text}"
)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
"""
Demo for accessing the xgboost eval metrics by using sklearn interface
======================================================================
"""
import numpy as np
from sklearn.datasets import make_hastie_10_2
import xgboost as xgb
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
# Map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:1600], X[1600:]
y_train, y_test = y[:1600], y[1600:]
param_dist = {"objective": "binary:logistic", "n_estimators": 2}
clf = xgb.XGBModel(
**param_dist,
eval_metric="logloss",
)
# Or you can use: clf = xgb.XGBClassifier(**param_dist)
clf.fit(
X_train,
y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
verbose=True,
)
# Load evals result by calling the evals_result() function
evals_result = clf.evals_result()
print("Access logloss metric directly from validation_0:")
print(evals_result["validation_0"]["logloss"])
print("")
print("Access metrics through a loop:")
for e_name, e_mtrs in evals_result.items():
print("- {}".format(e_name))
for e_mtr_name, e_mtr_vals in e_mtrs.items():
print(" - {}".format(e_mtr_name))
print(" - {}".format(e_mtr_vals))
print("")
print("Access complete dict:")
print(evals_result)
|
"""
Demo for accessing the xgboost eval metrics by using sklearn interface
======================================================================
"""
import numpy as np
from sklearn.datasets import make_hastie_10_2
import xgboost as xgb
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
# Map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:1600], X[1600:]
y_train, y_test = y[:1600], y[1600:]
param_dist = {'objective':'binary:logistic', 'n_estimators':2}
clf = xgb.XGBModel(**param_dist)
# Or you can use: clf = xgb.XGBClassifier(**param_dist)
clf.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric='logloss',
verbose=True)
# Load evals result by calling the evals_result() function
evals_result = clf.evals_result()
print('Access logloss metric directly from validation_0:')
print(evals_result['validation_0']['logloss'])
print('')
print('Access metrics through a loop:')
for e_name, e_mtrs in evals_result.items():
print('- {}'.format(e_name))
for e_mtr_name, e_mtr_vals in e_mtrs.items():
print(' - {}'.format(e_mtr_name))
print(' - {}'.format(e_mtr_vals))
print('')
print('Access complete dict:')
print(evals_result)
|
from abc import abstractmethod
from typing import Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_doc.base_node import BaseNode
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_doc.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
_proto_type_name: Optional[str] = None
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
:return: a representation of the tensor compatible with orjson
"""
return self
|
import pytest
import torch
from torchvision.prototype import datapoints
@pytest.mark.parametrize(
("data", "input_requires_grad", "expected_requires_grad"),
[
([0.0], None, False),
([0.0], False, False),
([0.0], True, True),
(torch.tensor([0.0], requires_grad=False), None, False),
(torch.tensor([0.0], requires_grad=False), False, False),
(torch.tensor([0.0], requires_grad=False), True, True),
(torch.tensor([0.0], requires_grad=True), None, True),
(torch.tensor([0.0], requires_grad=True), False, False),
(torch.tensor([0.0], requires_grad=True), True, True),
],
)
def test_new_requires_grad(data, input_requires_grad, expected_requires_grad):
datapoint = datapoints.Label(data, requires_grad=input_requires_grad)
assert datapoint.requires_grad is expected_requires_grad
def test_isinstance():
assert isinstance(
datapoints.Label([0, 1, 0], categories=["foo", "bar"]),
torch.Tensor,
)
def test_wrapping_no_copy():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert label.data_ptr() == tensor.data_ptr()
def test_to_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_to = label.to(torch.int32)
assert type(label_to) is datapoints.Label
assert label_to.dtype is torch.int32
assert label_to.categories is label.categories
def test_to_datapoint_reference():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"]).to(torch.int32)
tensor_to = tensor.to(label)
assert type(tensor_to) is torch.Tensor
assert tensor_to.dtype is torch.int32
def test_clone_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_clone = label.clone()
assert type(label_clone) is datapoints.Label
assert label_clone.data_ptr() != label.data_ptr()
assert label_clone.categories is label.categories
def test_requires_grad__wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.float32)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert not label.requires_grad
label_requires_grad = label.requires_grad_(True)
assert type(label_requires_grad) is datapoints.Label
assert label.requires_grad
assert label_requires_grad.requires_grad
def test_other_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
assert type(output) is torch.Tensor
@pytest.mark.parametrize(
"op",
[
lambda t: t.numpy(),
lambda t: t.tolist(),
lambda t: t.max(dim=-1),
],
)
def test_no_tensor_output_op_no_wrapping(op):
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = op(label)
assert type(output) is not datapoints.Label
def test_inplace_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = label.add_(0)
assert type(output) is torch.Tensor
assert type(label) is datapoints.Label
def test_wrap_like():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
label_new = datapoints.Label.wrap_like(label, output)
assert type(label_new) is datapoints.Label
assert label_new.data_ptr() == output.data_ptr()
assert label_new.categories is label.categories
|
import pytest
import torch
from torchvision.prototype import datapoints
def test_isinstance():
assert isinstance(
datapoints.Label([0, 1, 0], categories=["foo", "bar"]),
torch.Tensor,
)
def test_wrapping_no_copy():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert label.data_ptr() == tensor.data_ptr()
def test_to_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_to = label.to(torch.int32)
assert type(label_to) is datapoints.Label
assert label_to.dtype is torch.int32
assert label_to.categories is label.categories
def test_to_datapoint_reference():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"]).to(torch.int32)
tensor_to = tensor.to(label)
assert type(tensor_to) is torch.Tensor
assert tensor_to.dtype is torch.int32
def test_clone_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
label_clone = label.clone()
assert type(label_clone) is datapoints.Label
assert label_clone.data_ptr() != label.data_ptr()
assert label_clone.categories is label.categories
def test_requires_grad__wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.float32)
label = datapoints.Label(tensor, categories=["foo", "bar"])
assert not label.requires_grad
label_requires_grad = label.requires_grad_(True)
assert type(label_requires_grad) is datapoints.Label
assert label.requires_grad
assert label_requires_grad.requires_grad
def test_other_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
assert type(output) is torch.Tensor
@pytest.mark.parametrize(
"op",
[
lambda t: t.numpy(),
lambda t: t.tolist(),
lambda t: t.max(dim=-1),
],
)
def test_no_tensor_output_op_no_wrapping(op):
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = op(label)
assert type(output) is not datapoints.Label
def test_inplace_op_no_wrapping():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
output = label.add_(0)
assert type(output) is torch.Tensor
assert type(label) is datapoints.Label
def test_wrap_like():
tensor = torch.tensor([0, 1, 0], dtype=torch.int64)
label = datapoints.Label(tensor, categories=["foo", "bar"])
# any operation besides .to() and .clone() will do here
output = label * 2
label_new = datapoints.Label.wrap_like(label, output)
assert type(label_new) is datapoints.Label
assert label_new.data_ptr() == output.data_ptr()
assert label_new.categories is label.categories
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
|
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import layer_normalization as layer_normalization
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = tv_tensors.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with TVTensor classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`tv_tensor_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all TVTensors are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = tv_tensors.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with tv_tensor classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`tv_tensor_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all tv_tensors are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
import logging
import requests
from fastapi import Request
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.base import BaseWebhooksManager
logger = logging.getLogger(__name__)
class Slant3DWebhooksManager(BaseWebhooksManager):
"""Manager for Slant3D webhooks"""
PROVIDER_NAME = ProviderName.SLANT3D
BASE_URL = "https://www.slant3dapi.com/api"
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register a new webhook with Slant3D"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key is required to register a webhook")
headers = {
"api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json",
}
# Slant3D's API doesn't use events list, just register for all order updates
payload = {"endPoint": ingress_url}
response = requests.post(
f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload
)
if not response.ok:
error = response.json().get("error", "Unknown error")
raise RuntimeError(f"Failed to register webhook: {error}")
webhook_config = {
"endpoint": ingress_url,
"provider": self.PROVIDER_NAME,
"events": ["order.shipped"], # Currently the only supported event
"type": webhook_type,
}
return "", webhook_config
@classmethod
async def validate_payload(
cls, webhook: integrations.Webhook, request: Request
) -> tuple[dict, str]:
"""Validate incoming webhook payload from Slant3D"""
payload = await request.json()
# Validate required fields from Slant3D API spec
required_fields = ["orderId", "status", "trackingNumber", "carrierCode"]
missing_fields = [field for field in required_fields if field not in payload]
if missing_fields:
raise ValueError(f"Missing required fields: {', '.join(missing_fields)}")
# Normalize payload structure
normalized_payload = {
"orderId": payload["orderId"],
"status": payload["status"],
"trackingNumber": payload["trackingNumber"],
"carrierCode": payload["carrierCode"],
}
# Currently Slant3D only sends shipping notifications
# Convert status to lowercase for event format compatibility
event_type = f"order.{payload['status'].lower()}"
return normalized_payload, event_type
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Note: Slant3D API currently doesn't provide a deregistration endpoint.
This would need to be handled through support.
"""
# Log warning since we can't properly deregister
logger.warning(
f"Warning: Manual deregistration required for webhook {webhook.id}"
)
pass
|
import logging
from typing import ClassVar
import requests
from fastapi import Request
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.webhooks.base import BaseWebhooksManager
logger = logging.getLogger(__name__)
class Slant3DWebhooksManager(BaseWebhooksManager):
"""Manager for Slant3D webhooks"""
PROVIDER_NAME: ClassVar[str] = "slant3d"
BASE_URL = "https://www.slant3dapi.com/api"
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register a new webhook with Slant3D"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key is required to register a webhook")
headers = {
"api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json",
}
# Slant3D's API doesn't use events list, just register for all order updates
payload = {"endPoint": ingress_url}
response = requests.post(
f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload
)
if not response.ok:
error = response.json().get("error", "Unknown error")
raise RuntimeError(f"Failed to register webhook: {error}")
webhook_config = {
"endpoint": ingress_url,
"provider": self.PROVIDER_NAME,
"events": ["order.shipped"], # Currently the only supported event
"type": webhook_type,
}
return "", webhook_config
@classmethod
async def validate_payload(
cls, webhook: integrations.Webhook, request: Request
) -> tuple[dict, str]:
"""Validate incoming webhook payload from Slant3D"""
payload = await request.json()
# Validate required fields from Slant3D API spec
required_fields = ["orderId", "status", "trackingNumber", "carrierCode"]
missing_fields = [field for field in required_fields if field not in payload]
if missing_fields:
raise ValueError(f"Missing required fields: {', '.join(missing_fields)}")
# Normalize payload structure
normalized_payload = {
"orderId": payload["orderId"],
"status": payload["status"],
"trackingNumber": payload["trackingNumber"],
"carrierCode": payload["carrierCode"],
}
# Currently Slant3D only sends shipping notifications
# Convert status to lowercase for event format compatibility
event_type = f"order.{payload['status'].lower()}"
return normalized_payload, event_type
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Note: Slant3D API currently doesn't provide a deregistration endpoint.
This would need to be handled through support.
"""
# Log warning since we can't properly deregister
logger.warning(
f"Warning: Manual deregistration required for webhook {webhook.id}"
)
pass
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"])
class MaxPooling3D(BasePooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the maximum value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size=(2, 2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.MaxPooling3D", "keras.layers.MaxPool3D"])
class MaxPooling3D(BasePooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the maximum value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size=(2, 2, 2),
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="max",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
_EMBEDDING_DIM = 512
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=CLIPTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> list[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
"""Fake Embedding class for testing purposes."""
import math
from typing import List
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[float(1.0)] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.audio import AudioJaxArray # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import JaxArrayEmbedding # noqa F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageJaxArray # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoJaxArray # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'AudioNdArray',
'VideoNdArray',
]
def __getattr__(name: str):
if 'Torch' in name:
import_library('torch', raise_error=True)
elif 'TensorFlow' in name:
import_library('tensorflow', raise_error=True)
elif 'Jax' in name:
import_library('jax', raise_error=True)
lib: types.ModuleType
if name == 'TorchTensor':
import docarray.typing.tensor.torch_tensor as lib
elif name == 'TensorFlowTensor':
import docarray.typing.tensor.tensorflow_tensor as lib
elif name == 'JaxArray':
import docarray.typing.tensor.jaxarray as lib
elif name in ['TorchEmbedding', 'TensorFlowEmbedding', 'JaxArrayEmbedding']:
import docarray.typing.tensor.embedding as lib
elif name in ['ImageTorchTensor', 'ImageTensorFlowTensor', 'ImageJaxArray']:
import docarray.typing.tensor.image as lib
elif name in ['AudioTorchTensor', 'AudioTensorFlowTensor', 'AudioJaxArray']:
import docarray.typing.tensor.audio as lib
elif name in ['VideoTorchTensor', 'VideoTensorFlowTensor', 'VideoJaxArray']:
import docarray.typing.tensor.video as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'AudioNdArray',
'VideoNdArray',
]
def __getattr__(name: str):
if 'Torch' in name:
import_library('torch', raise_error=True)
elif 'TensorFlow' in name:
import_library('tensorflow', raise_error=True)
lib: types.ModuleType
if name == 'TorchTensor':
import docarray.typing.tensor.torch_tensor as lib
elif name == 'TensorFlowTensor':
import docarray.typing.tensor.tensorflow_tensor as lib
elif name in ['TorchEmbedding', 'TensorFlowEmbedding']:
import docarray.typing.tensor.embedding as lib
elif name in ['ImageTorchTensor', 'ImageTensorFlowTensor']:
import docarray.typing.tensor.image as lib
elif name in ['AudioTorchTensor', 'AudioTensorFlowTensor']:
import docarray.typing.tensor.audio as lib
elif name in ['VideoTorchTensor', 'VideoTensorFlowTensor']:
import docarray.typing.tensor.video as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .language_models import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .mot import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .reid import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
from .trackers import * # noqa: F401,F403
from .tracking_heads import * # noqa: F401,F403
from .vis import * # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
|
import types
from typing import TYPE_CHECKING
from docarray.store.file import FileDocStore
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.store.jac import JACDocStore # noqa: F401
from docarray.store.s3 import S3DocStore # noqa: F401
__all__ = ['FileDocStore']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'JACDocStore':
import_library('hubble', raise_error=True)
import docarray.store.jac as lib
elif name == 'S3DocStore':
import_library('smart_open', raise_error=True)
import_library('botocore', raise_error=True)
import_library('boto3', raise_error=True)
import docarray.store.s3 as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
store_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return store_cls
|
from docarray.store.file import FileDocStore
from docarray.store.jac import JACDocStore
from docarray.store.s3 import S3DocStore
__all__ = ['JACDocStore', 'FileDocStore', 'S3DocStore']
|
import enum
from typing import Any, Optional
import pydantic
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
from backend.data.graph import Graph
class WSMethod(enum.Enum):
SUBSCRIBE_GRAPH_EXEC = "subscribe_graph_execution"
UNSUBSCRIBE = "unsubscribe"
GRAPH_EXECUTION_EVENT = "graph_execution_event"
NODE_EXECUTION_EVENT = "node_execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WSMessage(pydantic.BaseModel):
method: WSMethod
data: Optional[dict[str, Any] | list[Any] | str] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class WSSubscribeGraphExecutionRequest(pydantic.BaseModel):
graph_exec_id: str
class ExecuteGraphResponse(pydantic.BaseModel):
graph_exec_id: str
class CreateGraph(pydantic.BaseModel):
graph: Graph
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: list[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: list[APIKeyPermission]
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[2]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
graph_version: int
class ExecuteGraphResponse(pydantic.BaseModel):
graph_exec_id: str
class CreateGraph(pydantic.BaseModel):
graph: backend.data.graph.Graph
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[2]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class RequestTopUp(pydantic.BaseModel):
credit_amount: int
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activiation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
from ..util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activiation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.