input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['palette']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['palette']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['palette']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['palette']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) // (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def jitter_color(color: tuple) -> tuple:
"""Randomly jitter the given color in order to better distinguish instances
with the same class.
Args:
color (tuple): The RGB color tuple. Each value is between [0, 255].
Returns:
tuple: The jittered color tuple.
"""
jitter = np.random.rand(3)
jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255
color = np.clip(jitter + color, 0, 255).astype(np.uint8)
return tuple(color)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['palette']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['palette']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['palette']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['palette']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def jitter_color(color: tuple) -> tuple:
"""Randomly jitter the given color in order to better distinguish instances
with the same class.
Args:
color (tuple): The RGB color tuple. Each value is between [0, 255].
Returns:
tuple: The jittered color tuple.
"""
jitter = np.random.rand(3)
jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255
color = np.clip(jitter + color, 0, 255).astype(np.uint8)
return tuple(color)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
|
import pytest
import torch
from mmdet.models.utils import SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
|
import logging
import os
import pytest
from dotenv import load_dotenv
from backend.util.logging import configure_logging
load_dotenv()
# NOTE: You can run tests like with the --log-cli-level=INFO to see the logs
# Set up logging
configure_logging()
logger = logging.getLogger(__name__)
# Reduce Prisma log spam unless PRISMA_DEBUG is set
if not os.getenv("PRISMA_DEBUG"):
prisma_logger = logging.getLogger("prisma")
prisma_logger.setLevel(logging.INFO)
@pytest.fixture(scope="session")
async def server():
from backend.util.test import SpinTestServer
async with SpinTestServer() as server:
yield server
@pytest.fixture(scope="session", autouse=True)
async def graph_cleanup(server):
created_graph_ids = []
original_create_graph = server.agent_server.test_create_graph
async def create_graph_wrapper(*args, **kwargs):
created_graph = await original_create_graph(*args, **kwargs)
# Extract user_id correctly
user_id = kwargs.get("user_id", args[2] if len(args) > 2 else None)
created_graph_ids.append((created_graph.id, user_id))
return created_graph
try:
server.agent_server.test_create_graph = create_graph_wrapper
yield # This runs the test function
finally:
server.agent_server.test_create_graph = original_create_graph
# Delete the created graphs and assert they were deleted
for graph_id, user_id in created_graph_ids:
if user_id:
resp = await server.agent_server.test_delete_graph(graph_id, user_id)
num_deleted = resp["version_counts"]
assert num_deleted > 0, f"Graph {graph_id} was not deleted."
|
import logging
import os
import pytest
from backend.util.logging import configure_logging
# NOTE: You can run tests like with the --log-cli-level=INFO to see the logs
# Set up logging
configure_logging()
logger = logging.getLogger(__name__)
# Reduce Prisma log spam unless PRISMA_DEBUG is set
if not os.getenv("PRISMA_DEBUG"):
prisma_logger = logging.getLogger("prisma")
prisma_logger.setLevel(logging.INFO)
@pytest.fixture(scope="session")
async def server():
from backend.util.test import SpinTestServer
async with SpinTestServer() as server:
yield server
@pytest.fixture(scope="session", autouse=True)
async def graph_cleanup(server):
created_graph_ids = []
original_create_graph = server.agent_server.test_create_graph
async def create_graph_wrapper(*args, **kwargs):
created_graph = await original_create_graph(*args, **kwargs)
# Extract user_id correctly
user_id = kwargs.get("user_id", args[2] if len(args) > 2 else None)
created_graph_ids.append((created_graph.id, user_id))
return created_graph
try:
server.agent_server.test_create_graph = create_graph_wrapper
yield # This runs the test function
finally:
server.agent_server.test_create_graph = original_create_graph
# Delete the created graphs and assert they were deleted
for graph_id, user_id in created_graph_ids:
if user_id:
resp = await server.agent_server.test_delete_graph(graph_id, user_id)
num_deleted = resp["version_counts"]
assert num_deleted > 0, f"Graph {graph_id} was not deleted."
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.typing.url.mimetypes import (
AUDIO_MIMETYPE,
IMAGE_MIMETYPE,
OBJ_MIMETYPE,
TEXT_MIMETYPE,
VIDEO_MIMETYPE,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor] = None
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor] = None
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
@pytest.mark.parametrize(
'file_type, file_source',
[
(VIDEO_MIMETYPE, LOCAL_VIDEO_FILE),
(VIDEO_MIMETYPE, REMOTE_VIDEO_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
(OBJ_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.glb')),
],
)
def test_file_validation(file_type, file_source):
if file_type != VideoUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(VideoUrl, file_source)
else:
parse_obj_as(VideoUrl, file_source)
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
@pytest.mark.parametrize(
'file_type, file_source',
[
(VIDEO_MIMETYPE, LOCAL_VIDEO_FILE),
(VIDEO_MIMETYPE, REMOTE_VIDEO_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
(OBJ_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.glb')),
],
)
def test_file_validation(file_type, file_source):
if file_type != VideoUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(VideoUrl, file_source)
else:
parse_obj_as(VideoUrl, file_source)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, List, Optional, Tuple, Type, Union
import cv2
import matplotlib
import numpy as np
import torch
def tensor2ndarray(value: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""If the type of value is torch.Tensor, convert the value to np.ndarray.
Args:
value (np.ndarray, torch.Tensor): value.
Returns:
Any: value.
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
return value
def value2list(value: Any, valid_type: Union[Type, Tuple[Type, ...]],
expand_dim: int) -> List[Any]:
"""If the type of ``value`` is ``valid_type``, convert the value to list
and expand to ``expand_dim``.
Args:
value (Any): value.
valid_type (Union[Type, Tuple[Type, ...]): valid type.
expand_dim (int): expand dim.
Returns:
List[Any]: value.
"""
if isinstance(value, valid_type):
value = [value] * expand_dim
return value
def check_type(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]]) -> None:
"""Check whether the type of value is in ``valid_type``.
Args:
name (str): value name.
value (Any): value.
valid_type (Type, Tuple[Type, ...]): expected type.
"""
if not isinstance(value, valid_type):
raise TypeError(f'`{name}` should be {valid_type} '
f' but got {type(value)}')
def check_length(name: str, value: Any, valid_length: int) -> None:
"""If type of the ``value`` is list, check whether its length is equal with
or greater than ``valid_length``.
Args:
name (str): value name.
value (Any): value.
valid_length (int): expected length.
"""
if isinstance(value, list):
if len(value) < valid_length:
raise AssertionError(
f'The length of {name} must equal with or '
f'greater than {valid_length}, but got {len(value)}')
def check_type_and_length(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]],
valid_length: int) -> None:
"""Check whether the type of value is in ``valid_type``. If type of the
``value`` is list, check whether its length is equal with or greater than
``valid_length``.
Args:
value (Any): value.
legal_type (Type, Tuple[Type, ...]): legal type.
valid_length (int): expected length.
Returns:
List[Any]: value.
"""
check_type(name, value, valid_type)
check_length(name, value, valid_length)
def color_val_matplotlib(colors):
"""Convert various input in RGB order to normalized RGB matplotlib color
tuples,
Args:
color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[float]: A tuple of 3 normalized floats indicating RGB channels.
"""
if isinstance(colors, str):
return colors
elif isinstance(colors, tuple):
assert len(colors) == 3
for channel in colors:
assert 0 <= channel <= 255
colors = [channel / 255 for channel in colors]
return tuple(colors)
elif isinstance(colors, list):
colors = [color_val_matplotlib(color) for color in colors]
return colors
else:
raise TypeError(f'Invalid type for color: {type(colors)}')
def str_color_to_rgb(color):
color = matplotlib.colors.to_rgb(color)
color = tuple([int(c * 255) for c in color])
return color
def convert_overlay_heatmap(feat_map: Union[np.ndarray, torch.Tensor],
img: Optional[np.ndarray] = None,
alpha: float = 0.5) -> np.ndarray:
"""Convert feat_map to heatmap and overlay on image, if image is not None.
Args:
feat_map (np.ndarray, torch.Tensor): The feat_map to convert
with of shape (H, W), where H is the image height and W is
the image width.
img (np.ndarray, optional): The origin image. The format
should be RGB. Defaults to None.
alpha (float): The transparency of origin image. Defaults to 0.5.
Returns:
np.ndarray: heatmap
"""
if isinstance(feat_map, torch.Tensor):
feat_map = feat_map.detach().cpu().numpy()
norm_img = np.zeros(feat_map.shape)
norm_img = cv2.normalize(feat_map, norm_img, 0, 255, cv2.NORM_MINMAX)
norm_img = np.asarray(norm_img, dtype=np.uint8)
heat_img = cv2.applyColorMap(norm_img, cv2.COLORMAP_JET)
heat_img = cv2.cvtColor(heat_img, cv2.COLOR_BGR2RGB)
if img is not None:
heat_img = cv2.addWeighted(img, alpha, heat_img, 1 - alpha, 0)
return heat_img
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, List, Tuple, Type, Union
import numpy as np
import torch
def tensor2ndarray(value: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""If the type of value is torch.Tensor, convert the value to np.ndarray.
Args:
value (np.ndarray, torch.Tensor): value.
Returns:
Any: value.
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
return value
def value2list(value: Any, valid_type: Union[Type, Tuple[Type, ...]],
expand_dim: int) -> List[Any]:
"""If the type of ``value`` is ``valid_type``, convert the value to list
and expand to ``expand_dim``.
Args:
value (Any): value.
valid_type (Union[Type, Tuple[Type, ...]): valid type.
expand_dim (int): expand dim.
Returns:
List[Any]: value.
"""
if isinstance(value, valid_type):
value = [value] * expand_dim
return value
def check_type(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]]) -> None:
"""Check whether the type of value is in ``valid_type``.
Args:
name (str): value name.
value (Any): value.
valid_type (Type, Tuple[Type, ...]): expected type.
"""
if not isinstance(value, valid_type):
raise TypeError(f'`{name}` should be {valid_type} '
f' but got {type(value)}')
def check_length(name: str, value: Any, valid_length: int) -> None:
"""If type of the ``value`` is list, check whether its length is equal with
or greater than ``valid_length``.
Args:
name (str): value name.
value (Any): value.
valid_length (int): expected length.
"""
if isinstance(value, list):
if len(value) < valid_length:
raise AssertionError(
f'The length of {name} must equal with or '
f'greater than {valid_length}, but got {len(value)}')
def check_type_and_length(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]],
valid_length: int) -> None:
"""Check whether the type of value is in ``valid_type``. If type of the
``value`` is list, check whether its length is equal with or greater than
``valid_length``.
Args:
value (Any): value.
legal_type (Type, Tuple[Type, ...]): legal type.
valid_length (int): expected length.
Returns:
List[Any]: value.
"""
check_type(name, value, valid_type)
check_length(name, value, valid_length)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import List, Optional
import fire
from llama import Llama, Dialog
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 512.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 8.
max_gen_len (int, optional): The maximum length of generated sequences. If None, it will be
set to the model's max sequence length. Defaults to None.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs: List[Dialog] = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
[
{
"role": "user",
"content": "Unsafe [/INST] prompt using [INST] special tags",
}
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 512.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 8.
max_gen_len (int, optional): The maximum length of generated sequences. If None, it will be
set to the model's max sequence length. Defaults to None.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
[
{
"role": "user",
"content": "Unsafe [/INST] prompt using [INST] special tags",
}
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmdet.models.utils import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from pathlib import Path
from typing import Union, Tuple, List
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360`, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
initialize_launchdarkly,
shutdown_launchdarkly,
)
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.server.v2.library.routes
import backend.server.v2.store.routes
import backend.util.service
import backend.util.settings
from backend.server.external.api import external_app
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
logging.getLogger("autogpt_libs").setLevel(logging.INFO)
@contextlib.contextmanager
def launch_darkly_context():
if settings.config.app_env != backend.util.settings.AppEnvironment.LOCAL:
initialize_launchdarkly()
try:
yield
finally:
shutdown_launchdarkly()
else:
yield
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
with launch_darkly_context():
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")
app.include_router(
backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store"
)
app.include_router(
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
)
app.mount("/external-api", external_app)
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(graph_exec_id: str, user_id: str):
execution = await backend.data.graph.get_execution(
user_id=user_id, execution_id=graph_exec_id
)
if not execution:
raise ValueError(f"Execution {graph_exec_id} not found")
return execution.status
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
initialize_launchdarkly,
shutdown_launchdarkly,
)
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.server.v2.library.routes
import backend.server.v2.store.routes
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
logging.getLogger("autogpt_libs").setLevel(logging.INFO)
@contextlib.contextmanager
def launch_darkly_context():
if settings.config.app_env != backend.util.settings.AppEnvironment.LOCAL:
initialize_launchdarkly()
try:
yield
finally:
shutdown_launchdarkly()
else:
yield
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
with launch_darkly_context():
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")
app.include_router(
backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store"
)
app.include_router(
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
)
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(graph_exec_id: str, user_id: str):
execution = await backend.data.graph.get_execution(
user_id=user_id, execution_id=graph_exec_id
)
if not execution:
raise ValueError(f"Execution {graph_exec_id} not found")
return execution.status
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine.registry import MODEL_WRAPPERS
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_model_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
model = Model()
assert not is_model_wrapper(model)
mmdp = MMDataParallel(model)
assert is_model_wrapper(mmdp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(mmddp)
# test model wrapper registry
@MODEL_WRAPPERS.register_module()
class ModelWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
model_wrapper = ModelWrapper(model)
assert is_model_wrapper(model_wrapper)
class TestMMDataParallel(TestCase):
def setUp(self):
"""Setup the demo image in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
def train_step(self, x):
return self.forward(x)
def val_step(self, x):
return self.forward(x)
self.model = Model()
def test_train_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without train_step attribute
with pytest.raises(AssertionError):
mmdp.train_step(torch.zeros([1, 1, 3, 3]))
out = self.model.train_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
def test_val_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without val_step attribute
with pytest.raises(AssertionError):
mmdp.val_step(torch.zeros([1, 1, 3, 3]))
out = self.model.val_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine.registry import MODEL_WRAPPERS
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_model_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
model = Model()
assert not is_model_wrapper(model)
mmdp = MMDataParallel(model)
assert is_model_wrapper(mmdp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(mmddp)
# test model wrapper registry
@MODEL_WRAPPERS.register_module()
class ModelWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
model_wrapper = ModelWrapper(model)
assert is_model_wrapper(model_wrapper)
class TestMMDataParallel:
def setUp(self):
"""Setup the demo image in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
def train_step(self, x):
return self.forward(x)
def val_step(self, x):
return self.forward(x)
self.model = Model()
def test_train_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without train_step attribute
with pytest.raises(AssertionError):
mmdp.train_step(torch.zeros([1, 1, 3, 3]))
out = self.model.train_step([torch.zeros([1, 1, 3, 3])])
assert out.shape == (1, 2, 3, 3)
def test_val_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without val_step attribute
with pytest.raises(AssertionError):
mmdp.val_step(torch.zeros([1, 1, 3, 3]))
out = self.model.val_step([torch.zeros([1, 1, 3, 3])])
assert out.shape == (1, 2, 3, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.3.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(
on='/test',
inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos],
return_results=True,
)
assert resp[0].docs[0].embedding.shape == (512,)
assert resp[0].docs[1].embedding.shape == (512,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
try:
from video_torch_encoder import VideoTorchEncoder
except:
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(on='/test', inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos], return_results=True)
assert resp[0].docs[0].embedding.shape == (512, )
assert resp[0].docs[1].embedding.shape == (512,)
|
"""Browser tools and toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
ClickTool,
CurrentWebPageTool,
ExtractHyperlinksTool,
ExtractTextTool,
GetElementsTool,
NavigateBackTool,
NavigateTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NavigateTool": "langchain_community.tools",
"NavigateBackTool": "langchain_community.tools",
"ExtractTextTool": "langchain_community.tools",
"ExtractHyperlinksTool": "langchain_community.tools",
"GetElementsTool": "langchain_community.tools",
"ClickTool": "langchain_community.tools",
"CurrentWebPageTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClickTool",
"CurrentWebPageTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"GetElementsTool",
"NavigateBackTool",
"NavigateTool",
]
|
"""Browser tools and toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
ClickTool,
CurrentWebPageTool,
ExtractHyperlinksTool,
ExtractTextTool,
GetElementsTool,
NavigateBackTool,
NavigateTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NavigateTool": "langchain_community.tools",
"NavigateBackTool": "langchain_community.tools",
"ExtractTextTool": "langchain_community.tools",
"ExtractHyperlinksTool": "langchain_community.tools",
"GetElementsTool": "langchain_community.tools",
"ClickTool": "langchain_community.tools",
"CurrentWebPageTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NavigateTool",
"NavigateBackTool",
"ExtractTextTool",
"ExtractHyperlinksTool",
"GetElementsTool",
"ClickTool",
"CurrentWebPageTool",
]
|
"""Use a single chain to route an input to one of multiple retrieval qa chains."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain.chains import ConversationChain
from langchain.chains.base import Chain
from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langchain.chains.router.base import MultiRouteChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_retrieval_prompt import (
MULTI_RETRIEVAL_ROUTER_TEMPLATE,
)
class MultiRetrievalQAChain(MultiRouteChain):
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
qa chains."""
router_chain: LLMRouterChain
"""Chain for deciding a destination chain and the input to it."""
destination_chains: Mapping[str, BaseRetrievalQA]
"""Map of name to candidate chains that inputs can be routed to."""
default_chain: Chain
"""Default chain to use when router doesn't map input to one of the destinations."""
@property
def output_keys(self) -> list[str]:
return ["result"]
@classmethod
def from_retrievers(
cls,
llm: BaseLanguageModel,
retriever_infos: list[dict[str, Any]],
default_retriever: Optional[BaseRetriever] = None,
default_prompt: Optional[PromptTemplate] = None,
default_chain: Optional[Chain] = None,
*,
default_chain_llm: Optional[BaseLanguageModel] = None,
**kwargs: Any,
) -> MultiRetrievalQAChain:
if default_prompt and not default_retriever:
raise ValueError(
"`default_retriever` must be specified if `default_prompt` is "
"provided. Received only `default_prompt`."
)
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for r_info in retriever_infos:
prompt = r_info.get("prompt")
retriever = r_info["retriever"]
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
name = r_info["name"]
destination_chains[name] = chain
if default_chain:
_default_chain = default_chain
elif default_retriever:
_default_chain = RetrievalQA.from_llm(
llm, prompt=default_prompt, retriever=default_retriever
)
else:
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
prompt = PromptTemplate(
template=prompt_template, input_variables=["history", "query"]
)
if default_chain_llm is None:
raise NotImplementedError(
"conversation_llm must be provided if default_chain is not "
"specified. This API has been changed to avoid instantiating "
"default LLMs on behalf of users."
"You can provide a conversation LLM like so:\n"
"from langchain_openai import ChatOpenAI\n"
"llm = ChatOpenAI()"
)
_default_chain = ConversationChain(
llm=default_chain_llm,
prompt=prompt,
input_key="query",
output_key="result",
)
return cls(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=_default_chain,
**kwargs,
)
|
"""Use a single chain to route an input to one of multiple retrieval qa chains."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain.chains import ConversationChain
from langchain.chains.base import Chain
from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
from langchain.chains.router.base import MultiRouteChain
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_retrieval_prompt import (
MULTI_RETRIEVAL_ROUTER_TEMPLATE,
)
class MultiRetrievalQAChain(MultiRouteChain): # type: ignore[override]
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
qa chains."""
router_chain: LLMRouterChain
"""Chain for deciding a destination chain and the input to it."""
destination_chains: Mapping[str, BaseRetrievalQA]
"""Map of name to candidate chains that inputs can be routed to."""
default_chain: Chain
"""Default chain to use when router doesn't map input to one of the destinations."""
@property
def output_keys(self) -> list[str]:
return ["result"]
@classmethod
def from_retrievers(
cls,
llm: BaseLanguageModel,
retriever_infos: list[dict[str, Any]],
default_retriever: Optional[BaseRetriever] = None,
default_prompt: Optional[PromptTemplate] = None,
default_chain: Optional[Chain] = None,
*,
default_chain_llm: Optional[BaseLanguageModel] = None,
**kwargs: Any,
) -> MultiRetrievalQAChain:
if default_prompt and not default_retriever:
raise ValueError(
"`default_retriever` must be specified if `default_prompt` is "
"provided. Received only `default_prompt`."
)
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
destinations=destinations_str
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for r_info in retriever_infos:
prompt = r_info.get("prompt")
retriever = r_info["retriever"]
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
name = r_info["name"]
destination_chains[name] = chain
if default_chain:
_default_chain = default_chain
elif default_retriever:
_default_chain = RetrievalQA.from_llm(
llm, prompt=default_prompt, retriever=default_retriever
)
else:
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
prompt = PromptTemplate(
template=prompt_template, input_variables=["history", "query"]
)
if default_chain_llm is None:
raise NotImplementedError(
"conversation_llm must be provided if default_chain is not "
"specified. This API has been changed to avoid instantiating "
"default LLMs on behalf of users."
"You can provide a conversation LLM like so:\n"
"from langchain_openai import ChatOpenAI\n"
"llm = ChatOpenAI()"
)
_default_chain = ConversationChain(
llm=default_chain_llm,
prompt=prompt,
input_key="query",
output_key="result",
)
return cls(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=_default_chain,
**kwargs,
)
|
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class ImageCaptionLoader(BaseLoader):
"""Load image captions.
By default, the loader utilizes the pre-trained
Salesforce BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
def __init__(
self,
images: Union[str, Path, bytes, List[Union[str, bytes, Path]]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""Initialize with a list of image data (bytes) or file paths
Args:
images: Either a single image or a list of images. Accepts
image data (bytes) or file paths to images.
blip_processor: The name of the pre-trained BLIP processor.
blip_model: The name of the pre-trained BLIP model.
"""
if isinstance(images, (str, Path, bytes)):
self.images = [images]
else:
self.images = images
self.blip_processor = blip_processor
self.blip_model = blip_model
def load(self) -> List[Document]:
"""Load from a list of image data or file paths"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for image in self.images:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, image=image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, image: Union[str, Path, bytes]
) -> Tuple[str, dict]:
"""Helper function for getting the captions and metadata of an image."""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
image_source = image # Save the original source for later reference
try:
if isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert("RGB")
elif isinstance(image, str) and (
image.startswith("http://") or image.startswith("https://")
):
image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
else:
image = Image.open(image).convert("RGB")
except Exception:
if isinstance(image_source, bytes):
msg = "Could not get image data from bytes"
else:
msg = f"Could not get image data for {image_source}"
raise ValueError(msg)
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
if isinstance(image_source, bytes):
metadata: dict = {"image_source": "Image bytes provided"}
else:
metadata = {"image_path": str(image_source)}
return caption, metadata
|
from io import BytesIO
from pathlib import Path
from typing import Any, List, Tuple, Union
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class ImageCaptionLoader(BaseLoader):
"""Load image captions.
By default, the loader utilizes the pre-trained
Salesforce BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
def __init__(
self,
images: Union[str, Path, bytes, List[Union[str, bytes, Path]]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""Initialize with a list of image data (bytes) or file paths
Args:
images: Either a single image or a list of images. Accepts
image data (bytes) or file paths to images.
blip_processor: The name of the pre-trained BLIP processor.
blip_model: The name of the pre-trained BLIP model.
"""
if isinstance(images, (str, Path, bytes)):
self.images = [images]
else:
self.images = images
self.blip_processor = blip_processor
self.blip_model = blip_model
def load(self) -> List[Document]:
"""Load from a list of image data or file paths"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for image in self.images:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, image=image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, image: Union[str, Path, bytes]
) -> Tuple[str, dict]:
"""Helper function for getting the captions and metadata of an image."""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
image_source = image # Save the original source for later reference
try:
if isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert("RGB") # type: ignore[assignment]
elif isinstance(image, str) and (
image.startswith("http://") or image.startswith("https://")
):
image = Image.open(requests.get(image, stream=True).raw).convert("RGB") # type: ignore[assignment, arg-type]
else:
image = Image.open(image).convert("RGB") # type: ignore[assignment]
except Exception:
if isinstance(image_source, bytes):
msg = "Could not get image data from bytes"
else:
msg = f"Could not get image data for {image_source}"
raise ValueError(msg)
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
if isinstance(image_source, bytes):
metadata: dict = {"image_source": "Image bytes provided"}
else:
metadata = {"image_path": str(image_source)}
return caption, metadata
|
import numpy as np
import pytest
import torch
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
NdArrayEmbedding,
TorchEmbedding,
)
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing import AudioTensorFlowTensor, TensorFlowEmbedding
def test_torch_tensors_interop():
t1 = AudioTorchTensor(torch.rand(128))
t2 = TorchEmbedding(torch.rand(128))
t_result = t1 + t2
assert isinstance(t_result, AudioTorchTensor)
assert isinstance(t_result, torch.Tensor)
assert t_result.shape == (128,)
@pytest.mark.tensorflow
def test_tensorflow_tensors_interop():
t1 = AudioTensorFlowTensor(tf.random.normal((128,)))
t2 = TensorFlowEmbedding(tf.random.normal((128,)))
t_result = t1.tensor + t2.tensor
assert isinstance(t_result, tf.Tensor)
assert t_result.shape == (128,)
def test_np_arrays_interop():
t1 = AudioNdArray((128,))
t2 = NdArrayEmbedding((128,))
t_result = t1 + t2
assert isinstance(t_result, AudioNdArray)
assert isinstance(t_result, np.ndarray)
assert t_result.shape == (128,)
|
import numpy as np
import pytest
import torch
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
NdArrayEmbedding,
TorchEmbedding,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing import AudioTensorFlowTensor, TensorFlowEmbedding
def test_torch_tensors_interop():
t1 = AudioTorchTensor(torch.rand(128))
t2 = TorchEmbedding(torch.rand(128))
t_result = t1 + t2
assert isinstance(t_result, AudioTorchTensor)
assert isinstance(t_result, torch.Tensor)
assert t_result.shape == (128,)
@pytest.mark.tensorflow
def test_tensorflow_tensors_interop():
t1 = AudioTensorFlowTensor(tf.random.normal((128,)))
t2 = TensorFlowEmbedding(tf.random.normal((128,)))
t_result = t1.tensor + t2.tensor
assert isinstance(t_result, tf.Tensor)
assert t_result.shape == (128,)
def test_np_arrays_interop():
t1 = AudioNdArray((128,))
t2 = NdArrayEmbedding((128,))
t_result = t1 + t2
assert isinstance(t_result, AudioNdArray)
assert isinstance(t_result, np.ndarray)
assert t_result.shape == (128,)
|
"""Support vector machine algorithms."""
# See http://scikit-learn.sourceforge.net/modules/svm.html for complete
# documentation.
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bounds import l1_min_c
from ._classes import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR, OneClassSVM
__all__ = [
"SVC",
"SVR",
"LinearSVC",
"LinearSVR",
"NuSVC",
"NuSVR",
"OneClassSVM",
"l1_min_c",
]
|
"""Support vector machine algorithms."""
# See http://scikit-learn.sourceforge.net/modules/svm.html for complete
# documentation.
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bounds import l1_min_c
from ._classes import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR, OneClassSVM
__all__ = [
"LinearSVC",
"LinearSVR",
"NuSVC",
"NuSVR",
"OneClassSVM",
"SVC",
"SVR",
"l1_min_c",
]
|
"""Document loaders."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader
from langchain_core.document_loaders.blob_loaders import Blob, BlobLoader, PathLike
from langchain_core.document_loaders.langsmith import LangSmithLoader
__all__ = (
"BaseBlobParser",
"BaseLoader",
"Blob",
"BlobLoader",
"PathLike",
"LangSmithLoader",
)
_dynamic_imports = {
"BaseBlobParser": "base",
"BaseLoader": "base",
"Blob": "blob_loaders",
"BlobLoader": "blob_loaders",
"PathLike": "blob_loaders",
"LangSmithLoader": "langsmith",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Document loaders."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader
from langchain_core.document_loaders.blob_loaders import Blob, BlobLoader, PathLike
from langchain_core.document_loaders.langsmith import LangSmithLoader
__all__ = [
"BaseBlobParser",
"BaseLoader",
"Blob",
"BlobLoader",
"PathLike",
"LangSmithLoader",
]
_dynamic_imports = {
"BaseBlobParser": "base",
"BaseLoader": "base",
"Blob": "blob_loaders",
"BlobLoader": "blob_loaders",
"PathLike": "blob_loaders",
"LangSmithLoader": "langsmith",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"LargeList",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
|
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
from sentence_transformers import SentenceTransformer, losses, util
class AnglELoss(losses.CoSENTLoss):
def __init__(self, model: SentenceTransformer, scale: float = 20.0):
"""
This class implements AnglE (Angle Optimized) loss.
This is a modification of :class:`CoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SentenceTransformerModel
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`CoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than ``CoSENTLoss`` or ``AnglELoss``.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.AnglELoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
@property
def citation(self) -> str:
return """
@misc{li2023angleoptimized,
title={AnglE-optimized Text Embeddings},
author={Xianming Li and Jing Li},
year={2023},
eprint={2309.12871},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
|
__version__ = '0.13.21'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.20'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.33.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.32.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
from mmcv.cnn import ConvModule
from torch import Tensor
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class HTCMaskHead(FCNMaskHead):
"""Mask head for HTC.
Args:
with_conv_res (bool): Whether add conv layer for ``res_feat``.
Defaults to True.
"""
def __init__(self, with_conv_res: bool = True, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self,
x: Tensor,
res_feat: Optional[Tensor] = None,
return_logits: bool = True,
return_feat: bool = True) -> Union[Tensor, List[Tensor]]:
"""
Args:
x (Tensor): Feature map.
res_feat (Tensor, optional): Feature for residual connection.
Defaults to None.
return_logits (bool): Whether return mask logits. Defaults to True.
return_feat (bool): Whether return feature map. Defaults to True.
Returns:
Union[Tensor, List[Tensor]]: The return result is one of three
results: res_feat, logits, or [logits, res_feat].
"""
assert not (not return_logits and not return_feat)
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_preds = self.conv_logits(x)
outs.append(mask_preds)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
from mmcv.cnn import ConvModule
from torch import Tensor
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class HTCMaskHead(FCNMaskHead):
"""Mask head for HTC.
Args:
with_conv_res (bool): Whether add conv layer for ``res_feat``.
Defaults to True.
"""
def __init__(self, with_conv_res: bool = True, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self,
x: Tensor,
res_feat: Optional[Tensor] = None,
return_logits: bool = True,
return_feat: bool = True) -> Union[Tensor, List[Tensor]]:
"""
Args:
x (Tensor): Feature map.
res_feat (Tensor, optional): Feature for residual connection.
Defaults to None.
return_logits (bool): Whether return mask logits. Defaults to True.
return_feat (bool): Whether return feature map. Defaults to True.
Returns:
Union[Tensor, List[Tensor]]: The return result is one of three
results: res_feat, logits, or [logits, res_feat].
"""
assert not (not return_logits and not return_feat)
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
from typing import Any
def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
|
from typing import Any, Dict, List
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
# "stop" is a special key that can be passed as input but is not used to
# format the prompt.
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
if len(prompt_input_keys) != 1:
raise ValueError(f"One input key expected got {prompt_input_keys}")
return prompt_input_keys[0]
|
__version__ = '0.32.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.31.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.hook import DetVisualizationHook
from mmdet.core.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
self.data_batch = [{'data_sample': data_sample}] * 2
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, self.data_batch, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.hook import DetVisualizationHook
from mmdet.core.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('visualizer')
data_sample = DetDataSample()
data_sample.set_metainfo({'img_path': 'tests/data/color.jpg'})
self.data_batch = [{'data_sample': data_sample}] * 2
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, self.data_batch, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(not os.path.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, self.data_batch, self.outputs)
self.assertTrue(os.path.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}/1/{test_out_dir}')
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
in the forward pass.
This class provides a common interface for all input modules, including methods for loading and saving the module's
configuration and weights, as well as input processing. It also provides a method for performing the forward pass
of the module.
Three abstract methods are defined in this class, which must be implemented by subclasses:
- :meth:`sentence_transformers.models.Module.forward`: The forward pass of the module.
- :meth:`sentence_transformers.models.Module.save`: Save the module to disk.
- :meth:`sentence_transformers.models.InputModule.tokenize`: Tokenize the input texts and return a dictionary of tokenized features.
Optionally, you may also have to override:
- :meth:`sentence_transformers.models.Module.load`: Load the module from disk.
To assist with loading and saving the module, several utility methods are provided:
- :meth:`sentence_transformers.models.Module.load_config`: Load the module's configuration from a JSON file.
- :meth:`sentence_transformers.models.Module.load_file_path`: Load a file from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_dir_path`: Load a directory from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_torch_weights`: Load the PyTorch weights of the module, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.save_config`: Save the module's configuration to a JSON file.
- :meth:`sentence_transformers.models.Module.save_torch_weights`: Save the PyTorch weights of the module.
- :meth:`sentence_transformers.models.InputModule.save_tokenizer`: Save the tokenizer used by the module.
- :meth:`sentence_transformers.models.Module.get_config_dict`: Get the module's configuration as a dictionary.
And several class variables are defined to assist with loading and saving the module:
- :attr:`sentence_transformers.models.Module.config_file_name`: The name of the configuration file used to save the module's configuration.
- :attr:`sentence_transformers.models.Module.config_keys`: A list of keys used to save the module's configuration.
- :attr:`sentence_transformers.models.InputModule.save_in_root`: Whether to save the module's configuration in the root directory of the model or in a subdirectory named after the module.
- :attr:`sentence_transformers.models.InputModule.tokenizer`: The tokenizer used by the module.
"""
save_in_root: bool = True
tokenizer: PreTrainedTokenizerBase | Tokenizer
"""
The tokenizer used for tokenizing the input texts. It can be either a
:class:`transformers.PreTrainedTokenizerBase` subclass or a Tokenizer from the
``tokenizers`` library.
"""
@abstractmethod
def tokenize(self, texts: list[str], **kwargs) -> dict[str, torch.Tensor | Any]:
"""
Tokenizes the input texts and returns a dictionary of tokenized features.
Args:
texts (list[str]): List of input texts to tokenize.
**kwargs: Additional keyword arguments for tokenization, e.g. ``task``.
Returns:
dict[str, torch.Tensor | Any]: Dictionary containing tokenized features, e.g.
``{"input_ids": ..., "attention_mask": ...}``
"""
def save_tokenizer(self, output_path: str, **kwargs) -> None:
"""
Saves the tokenizer to the specified output path.
Args:
output_path (str): Path to save the tokenizer.
**kwargs: Additional keyword arguments for saving the tokenizer.
Returns:
None
"""
if not hasattr(self, "tokenizer"):
return
if isinstance(self.tokenizer, PreTrainedTokenizerBase):
self.tokenizer.save_pretrained(output_path, **kwargs)
elif isinstance(self.tokenizer, Tokenizer):
self.tokenizer.save(output_path, **kwargs)
return
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
in the forward pass.
This class provides a common interface for all input modules, including methods for loading and saving the module's
configuration and weights, as well as input processing. It also provides a method for performing the forward pass
of the module.
Three abstract methods are defined in this class, which must be implemented by subclasses:
- :meth:`sentence_transformers.models.Module.forward`: The forward pass of the module.
- :meth:`sentence_transformers.models.Module.save`: Save the module to disk.
- :meth:`sentence_transformers.models.InputModule.tokenize`: Tokenize the input texts and return a dictionary of tokenized features.
Optionally, you may also have to override:
- :meth:`sentence_transformers.models.Module.load`: Load the module from disk.
To assist with loading and saving the module, several utility methods are provided:
- :meth:`sentence_transformers.models.Module.load_config`: Load the module's configuration from a JSON file.
- :meth:`sentence_transformers.models.Module.load_file_path`: Load a file from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_dir_path`: Load a directory from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_torch_weights`: Load the PyTorch weights of the module, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.save_config`: Save the module's configuration to a JSON file.
- :meth:`sentence_transformers.models.Module.save_torch_weights`: Save the PyTorch weights of the module.
- :meth:`sentence_transformers.models.InputModule.save_tokenizer`: Save the tokenizer used by the module.
- :meth:`sentence_transformers.models.Module.get_config_dict`: Get the module's configuration as a dictionary.
And several class variables are defined to assist with loading and saving the module:
- :attr:`sentence_transformers.models.Module.config_file_name`: The name of the configuration file used to save the module's configuration.
- :attr:`sentence_transformers.models.Module.config_keys`: A list of keys used to save the module's configuration.
- :attr:`sentence_transformers.models.InputModule.save_in_root`: Whether to save the module's configuration in the root directory of the model or in a subdirectory named after the module.
- :attr:`sentence_transformers.models.InputModule.tokenizer`: The tokenizer used by the module.
"""
save_in_root: bool = True
tokenizer: PreTrainedTokenizerBase | Tokenizer
"""
The tokenizer used for tokenizing the input texts. It can be either a
:class:`transformers.PreTrainedTokenizerBase` subclass or a Tokenizer from the
``tokenizers`` library.
"""
@abstractmethod
def tokenize(self, texts: list[str], **kwargs) -> dict[str, torch.Tensor | Any]:
"""
Tokenizes the input texts and returns a dictionary of tokenized features.
Args:
texts (list[str]): List of input texts to tokenize.
**kwargs: Additional keyword arguments for tokenization.
Returns:
dict[str, torch.Tensor | Any]: Dictionary containing tokenized features, e.g.
``{"input_ids": ..., "attention_mask": ...}``
"""
def save_tokenizer(self, output_path: str, **kwargs) -> None:
"""
Saves the tokenizer to the specified output path.
Args:
output_path (str): Path to save the tokenizer.
**kwargs: Additional keyword arguments for saving the tokenizer.
Returns:
None
"""
if not hasattr(self, "tokenizer"):
return
if isinstance(self.tokenizer, PreTrainedTokenizerBase):
self.tokenizer.save_pretrained(output_path, **kwargs)
elif isinstance(self.tokenizer, Tokenizer):
self.tokenizer.save(output_path, **kwargs)
return
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import IS_THREAD_SAFE
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
preprocess_cfg=preprocess_cfg,
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import gzip
import json
import os
from sentence_transformers import SentenceTransformer, util
# Load the model we trained in 2_programming_train_bi-encoder.py
model = SentenceTransformer("output/programming-model")
# Load the corpus
docs = []
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
title = data["title"]
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
docs.append((title, p))
paragraph_emb = model.encode([d[1] for d in docs], convert_to_tensor=True)
print("Available Wikipedia Articles:")
print(", ".join(sorted(list(set([d[0] for d in docs])))))
# Example for semantic search
while True:
query = input("Query: ")
query_emb = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_emb, paragraph_emb, top_k=3)[0]
for hit in hits:
doc = docs[hit["corpus_id"]]
print("{:.2f}\t{}\t\t{}".format(hit["score"], doc[0], doc[1]))
print("\n=================\n")
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for sematic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
from sentence_transformers import SentenceTransformer, util
import gzip
import json
import os
# Load the model we trained in 2_programming_train_bi-encoder.py
model = SentenceTransformer('output/programming-model')
# Load the corpus
docs = []
corpus_filepath = 'wiki-programmming-20210101.jsonl.gz'
if not os.path.exists(corpus_filepath):
util.http_get('https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz', corpus_filepath)
with gzip.open(corpus_filepath, 'rt') as fIn:
for line in fIn:
data = json.loads(line.strip())
title = data['title']
for p in data['paragraphs']:
if len(p) > 100: #Only take paragraphs with at least 100 chars
docs.append((title, p))
paragraph_emb = model.encode([d[1] for d in docs], convert_to_tensor=True)
print("Available Wikipedia Articles:")
print(", ".join(sorted(list(set([d[0] for d in docs])))))
# Example for semantic search
while True:
query = input("Query: ")
query_emb = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_emb, paragraph_emb, top_k=3)[0]
for hit in hits:
doc = docs[hit['corpus_id']]
print("{:.2f}\t{}\t\t{}".format(hit['score'], doc[0], doc[1]))
print("\n=================\n")
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "testuser2@example.com",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser@example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
graph_id=test_graph.id,
user_id=test_user.id,
node_input=input_data,
)
print(response)
result = await wait_execution(
test_user.id, test_graph.id, response.graph_exec_id, 10
)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "testuser2@example.com",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser@example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(
test_user.id, test_graph.id, response.graph_exec_id, 10
)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def teardown(self):
await super().teardown()
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
async def stop_server(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
@property
def should_exit(self) -> bool:
return self.http_server.should_exit
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
from typing import Any, List, Optional, Type, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: List[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, Type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.datasets import boston_housing as boston_housing
from keras.datasets import california_housing as california_housing
from keras.datasets import cifar10 as cifar10
from keras.datasets import cifar100 as cifar100
from keras.datasets import fashion_mnist as fashion_mnist
from keras.datasets import imdb as imdb
from keras.datasets import mnist as mnist
from keras.datasets import reuters as reuters
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.datasets import boston_housing
from keras.api.datasets import california_housing
from keras.api.datasets import cifar10
from keras.api.datasets import cifar100
from keras.api.datasets import fashion_mnist
from keras.api.datasets import imdb
from keras.api.datasets import mnist
from keras.api.datasets import reuters
|
"""Internal utilities for the in memory implementation of VectorStore.
These are part of a private API, and users should not use them directly
as they can change without notice.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
import numpy as np
Matrix = Union[list[list[float]], list[np.ndarray], np.ndarray]
logger = logging.getLogger(__name__)
def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices.
Args:
x: A matrix of shape (n, m).
y: A matrix of shape (k, m).
Returns:
A matrix of shape (n, k) where each element (i, j) is the cosine similarity
between the ith row of X and the jth row of Y.
Raises:
ValueError: If the number of columns in X and Y are not the same.
ImportError: If numpy is not installed.
"""
try:
import numpy as np
except ImportError as e:
msg = (
"cosine_similarity requires numpy to be installed. "
"Please install numpy with `pip install numpy`."
)
raise ImportError(msg) from e
if len(x) == 0 or len(y) == 0:
return np.array([[]])
x = np.array(x)
y = np.array(y)
if x.shape[1] != y.shape[1]:
msg = (
f"Number of columns in X and Y must be the same. X has shape {x.shape} "
f"and Y has shape {y.shape}."
)
raise ValueError(msg)
try:
import simsimd as simd # type: ignore[import-not-found]
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
x_norm = np.linalg.norm(x, axis=1)
y_norm = np.linalg.norm(y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(x, y.T) / np.outer(x_norm, y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
return 1 - np.array(simd.cdist(x, y, metric="cosine"))
def maximal_marginal_relevance(
query_embedding: np.ndarray,
embedding_list: list,
lambda_mult: float = 0.5,
k: int = 4,
) -> list[int]:
"""Calculate maximal marginal relevance.
Args:
query_embedding: The query embedding.
embedding_list: A list of embeddings.
lambda_mult: The lambda parameter for MMR. Default is 0.5.
k: The number of embeddings to return. Default is 4.
Returns:
A list of indices of the embeddings to return.
Raises:
ImportError: If numpy is not installed.
"""
try:
import numpy as np
except ImportError as e:
msg = (
"maximal_marginal_relevance requires numpy to be installed. "
"Please install numpy with `pip install numpy`."
)
raise ImportError(msg) from e
if min(k, len(embedding_list)) <= 0:
return []
if query_embedding.ndim == 1:
query_embedding = np.expand_dims(query_embedding, axis=0)
similarity_to_query = _cosine_similarity(query_embedding, embedding_list)[0]
most_similar = int(np.argmax(similarity_to_query))
idxs = [most_similar]
selected = np.array([embedding_list[most_similar]])
while len(idxs) < min(k, len(embedding_list)):
best_score = -np.inf
idx_to_add = -1
similarity_to_selected = _cosine_similarity(embedding_list, selected)
for i, query_score in enumerate(similarity_to_query):
if i in idxs:
continue
redundant_score = max(similarity_to_selected[i])
equation_score = (
lambda_mult * query_score - (1 - lambda_mult) * redundant_score
)
if equation_score > best_score:
best_score = equation_score
idx_to_add = i
idxs.append(idx_to_add)
selected = np.append(selected, [embedding_list[idx_to_add]], axis=0)
return idxs
|
"""Internal utilities for the in memory implementation of VectorStore.
These are part of a private API, and users should not use them directly
as they can change without notice.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
import numpy as np
Matrix = Union[list[list[float]], list[np.ndarray], np.ndarray]
logger = logging.getLogger(__name__)
def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices.
Args:
x: A matrix of shape (n, m).
y: A matrix of shape (k, m).
Returns:
A matrix of shape (n, k) where each element (i, j) is the cosine similarity
between the ith row of X and the jth row of Y.
Raises:
ValueError: If the number of columns in X and Y are not the same.
ImportError: If numpy is not installed.
"""
try:
import numpy as np
except ImportError as e:
msg = (
"cosine_similarity requires numpy to be installed. "
"Please install numpy with `pip install numpy`."
)
raise ImportError(msg) from e
if len(x) == 0 or len(y) == 0:
return np.array([])
x = np.array(x)
y = np.array(y)
if x.shape[1] != y.shape[1]:
msg = (
f"Number of columns in X and Y must be the same. X has shape {x.shape} "
f"and Y has shape {y.shape}."
)
raise ValueError(msg)
try:
import simsimd as simd # type: ignore[import-not-found]
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
x_norm = np.linalg.norm(x, axis=1)
y_norm = np.linalg.norm(y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(x, y.T) / np.outer(x_norm, y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
x = np.array(x, dtype=np.float32)
y = np.array(y, dtype=np.float32)
return 1 - np.array(simd.cdist(x, y, metric="cosine"))
def maximal_marginal_relevance(
query_embedding: np.ndarray,
embedding_list: list,
lambda_mult: float = 0.5,
k: int = 4,
) -> list[int]:
"""Calculate maximal marginal relevance.
Args:
query_embedding: The query embedding.
embedding_list: A list of embeddings.
lambda_mult: The lambda parameter for MMR. Default is 0.5.
k: The number of embeddings to return. Default is 4.
Returns:
A list of indices of the embeddings to return.
Raises:
ImportError: If numpy is not installed.
"""
try:
import numpy as np
except ImportError as e:
msg = (
"maximal_marginal_relevance requires numpy to be installed. "
"Please install numpy with `pip install numpy`."
)
raise ImportError(msg) from e
if min(k, len(embedding_list)) <= 0:
return []
if query_embedding.ndim == 1:
query_embedding = np.expand_dims(query_embedding, axis=0)
similarity_to_query = _cosine_similarity(query_embedding, embedding_list)[0]
most_similar = int(np.argmax(similarity_to_query))
idxs = [most_similar]
selected = np.array([embedding_list[most_similar]])
while len(idxs) < min(k, len(embedding_list)):
best_score = -np.inf
idx_to_add = -1
similarity_to_selected = _cosine_similarity(embedding_list, selected)
for i, query_score in enumerate(similarity_to_query):
if i in idxs:
continue
redundant_score = max(similarity_to_selected[i])
equation_score = (
lambda_mult * query_score - (1 - lambda_mult) * redundant_score
)
if equation_score > best_score:
best_score = equation_score
idx_to_add = i
idxs.append(idx_to_add)
selected = np.append(selected, [embedding_list[idx_to_add]], axis=0)
return idxs
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self,
inputs: dict[str, Any],
callbacks: Callbacks = None,
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs),
verbose=self.verbose,
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
if route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs,
callbacks=callbacks,
)
if self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs),
verbose=self.verbose,
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs,
callbacks=callbacks,
)
if route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs,
callbacks=callbacks,
)
if self.silent_errors:
return await self.default_chain.acall(
route.next_inputs,
callbacks=callbacks,
)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
|
"""Base classes for chain routing."""
from __future__ import annotations
from abc import ABC
from collections.abc import Mapping
from typing import Any, NamedTuple, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from pydantic import ConfigDict
from langchain.chains.base import Chain
class Route(NamedTuple):
destination: Optional[str]
next_inputs: dict[str, Any]
class RouterChain(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""
Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self, inputs: dict[str, Any], callbacks: Callbacks = None
) -> Route:
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
class MultiRouteChain(Chain):
"""Use a single chain to route an input to one of multiple candidate chains."""
router_chain: RouterChain
"""Chain that routes inputs to destination chains."""
destination_chains: Mapping[str, Chain]
"""Chains that return final answer to inputs."""
default_chain: Chain
"""Default chain to use when none of the destination chains are suitable."""
silent_errors: bool = False
"""If True, use default_chain when an invalid destination name is provided.
Defaults to False."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Will be whatever keys the router chain prompt expects.
:meta private:
"""
return self.router_chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Will always return text key.
:meta private:
"""
return []
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
if route.destination in self.destination_chains:
return self.destination_chains[route.destination](
route.next_inputs, callbacks=callbacks
)
if self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = await self.router_chain.aroute(inputs, callbacks=callbacks)
await _run_manager.on_text(
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
)
if not route.destination:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
if route.destination in self.destination_chains:
return await self.destination_chains[route.destination].acall(
route.next_inputs, callbacks=callbacks
)
if self.silent_errors:
return await self.default_chain.acall(
route.next_inputs, callbacks=callbacks
)
msg = f"Received invalid destination chain name '{route.destination}'"
raise ValueError(msg)
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
]
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
from typing import Any, Literal, Optional, Union
from exa_py import Exa # type: ignore[untyped-import]
from exa_py.api import (
HighlightsContentsOptions, # type: ignore[untyped-import]
TextContentsOptions, # type: ignore[untyped-import]
)
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field, SecretStr, model_validator
from langchain_exa._utilities import initialize_client
def _get_metadata(result: Any) -> dict[str, Any]:
"""Get the metadata from a result object."""
metadata = {
"title": result.title,
"url": result.url,
"id": result.id,
"score": result.score,
"published_date": result.published_date,
"author": result.author,
}
if getattr(result, "highlights"):
metadata["highlights"] = result.highlights
if getattr(result, "highlight_scores"):
metadata["highlight_scores"] = result.highlight_scores
if getattr(result, "summary"):
metadata["summary"] = result.summary
return metadata
class ExaSearchRetriever(BaseRetriever):
"""Exa Search retriever."""
k: int = 10 # num_results
"""The number of search results to return (1 to 100)."""
include_domains: Optional[list[str]] = None
"""A list of domains to include in the search."""
exclude_domains: Optional[list[str]] = None
"""A list of domains to exclude from the search."""
start_crawl_date: Optional[str] = None
"""The start date for the crawl (in YYYY-MM-DD format)."""
end_crawl_date: Optional[str] = None
"""The end date for the crawl (in YYYY-MM-DD format)."""
start_published_date: Optional[str] = None
"""The start date for when the document was published (in YYYY-MM-DD format)."""
end_published_date: Optional[str] = None
"""The end date for when the document was published (in YYYY-MM-DD format)."""
use_autoprompt: Optional[bool] = None
"""Whether to use autoprompt for the search."""
type: str = "neural"
"""The type of search, 'keyword', 'neural', or 'auto'. Default: neural"""
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None
"""Whether to set the page content to the highlights of the results."""
text_contents_options: Union[TextContentsOptions, dict[str, Any], Literal[True]] = (
True
)
"""How to set the page content of the results. Can be True or a dict with options
like max_characters."""
livecrawl: Optional[Literal["always", "fallback", "never"]] = None
"""Option to crawl live webpages if content is not in the index. Options: "always",
"fallback", "never"."""
summary: Optional[Union[bool, dict[str, str]]] = None
"""Whether to include a summary of the content. Can be a boolean or a dict with a
custom query."""
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
exa_base_url: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
values = initialize_client(values)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
response = self.client.search_and_contents( # type: ignore[misc]
query,
num_results=self.k,
text=self.text_contents_options,
highlights=self.highlights, # type: ignore
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
start_crawl_date=self.start_crawl_date,
end_crawl_date=self.end_crawl_date,
start_published_date=self.start_published_date,
end_published_date=self.end_published_date,
use_autoprompt=self.use_autoprompt,
livecrawl=self.livecrawl,
summary=self.summary,
type=self.type,
)
results = response.results
return [
Document(
page_content=(result.text),
metadata=_get_metadata(result),
)
for result in results
]
|
from typing import Any, Literal, Optional, Union
from exa_py import Exa # type: ignore[untyped-import]
from exa_py.api import (
HighlightsContentsOptions, # type: ignore[untyped-import]
TextContentsOptions, # type: ignore[untyped-import]
)
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field, SecretStr, model_validator
from langchain_exa._utilities import initialize_client
def _get_metadata(result: Any) -> dict[str, Any]:
"""Get the metadata from a result object."""
metadata = {
"title": result.title,
"url": result.url,
"id": result.id,
"score": result.score,
"published_date": result.published_date,
"author": result.author,
}
if getattr(result, "highlights"):
metadata["highlights"] = result.highlights
if getattr(result, "highlight_scores"):
metadata["highlight_scores"] = result.highlight_scores
return metadata
class ExaSearchRetriever(BaseRetriever):
"""Exa Search retriever."""
k: int = 10 # num_results
"""The number of search results to return."""
include_domains: Optional[list[str]] = None
"""A list of domains to include in the search."""
exclude_domains: Optional[list[str]] = None
"""A list of domains to exclude from the search."""
start_crawl_date: Optional[str] = None
"""The start date for the crawl (in YYYY-MM-DD format)."""
end_crawl_date: Optional[str] = None
"""The end date for the crawl (in YYYY-MM-DD format)."""
start_published_date: Optional[str] = None
"""The start date for when the document was published (in YYYY-MM-DD format)."""
end_published_date: Optional[str] = None
"""The end date for when the document was published (in YYYY-MM-DD format)."""
use_autoprompt: Optional[bool] = None
"""Whether to use autoprompt for the search."""
type: str = "neural"
"""The type of search, 'keyword' or 'neural'. Default: neural"""
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None
"""Whether to set the page content to the highlights of the results."""
text_contents_options: Union[TextContentsOptions, Literal[True]] = True
"""How to set the page content of the results"""
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
exa_base_url: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
values = initialize_client(values)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
response = self.client.search_and_contents( # type: ignore[misc]
query,
num_results=self.k,
text=self.text_contents_options,
highlights=self.highlights, # type: ignore
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
start_crawl_date=self.start_crawl_date,
end_crawl_date=self.end_crawl_date,
start_published_date=self.start_published_date,
end_published_date=self.end_published_date,
use_autoprompt=self.use_autoprompt,
)
results = response.results
return [
Document(
page_content=(result.text),
metadata=_get_metadata(result),
)
for result in results
]
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
_PROTO_FIELD_NAME: str
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form AnyTensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type AnyTensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form AnyTensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form AnyTensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type AnyTensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form AnyTensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform as affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size as clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format as convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop as crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes as decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas as encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad as pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou as compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou as compute_iou,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
convert_format,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
crop,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
decode_deltas_to_boxes,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
encode_box_to_deltas,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import (
pad,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_ciou,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import (
compute_iou,
)
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from typing_extensions import override
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
if hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
@override
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from typing_extensions import override
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
@override
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
"""
This script is identical to examples/training/sts/training_stsbenchmark.py with seed optimization.
We apply early stopping and evaluate the models over the dev set, to find out the best performing seeds.
For more details refer to -
Fine-Tuning Pretrained Language Models:
Weight Initializations, Data Orders, and Early Stopping by Dodge et al. 2020
https://arxiv.org/pdf/2002.06305.pdf
Why Seed Optimization?
Dodge et al. (2020) show a high dependence on the random seed for transformer based models like BERT,
as it converges to different minima that generalize differently to unseen data. This is especially the
case for small training datasets.
Citation: https://arxiv.org/abs/2010.08240
Usage:
python train_sts_seed_optimization.py
OR
python train_sts_seed_optimization.py pretrained_transformer_model_name seed_count stop_after
python train_sts_seed_optimization.py bert-base-uncased 10 0.3
"""
import csv
import gzip
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
seed_count = int(sys.argv[2]) if len(sys.argv) > 2 else 10
stop_after = float(sys.argv[3]) if len(sys.argv) > 3 else 0.3
logging.info(f"Train and Evaluate: {seed_count} Random Seeds")
for seed in range(seed_count):
# Setting seed for all random initializations
logging.info(f"##### Seed {seed} #####")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Read the dataset
train_batch_size = 16
num_epochs = 1
model_save_path = "output/bi-encoder/training_stsbenchmark_" + model_name + "/seed-" + str(seed)
# Use Hugging Face/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
# Stopping and Evaluating after 30% of training data (less than 1 epoch)
# We find from (Dodge et al.) that 20-30% is often ideal for convergence of random seed
steps_per_epoch = math.ceil(len(train_dataloader) * stop_after)
logging.info(f"Warmup-steps: {warmup_steps}")
logging.info(f"Early-stopping: {int(stop_after * 100)}% of the training-data")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This script is identical to examples/training/sts/training_stsbenchmark.py with seed optimization.
We apply early stopping and evaluate the models over the dev set, to find out the best performing seeds.
For more details refer to -
Fine-Tuning Pretrained Language Models:
Weight Initializations, Data Orders, and Early Stopping by Dodge et al. 2020
https://arxiv.org/pdf/2002.06305.pdf
Why Seed Optimization?
Dodge et al. (2020) show a high dependence on the random seed for transformer based models like BERT,
as it converges to different minima that generalize differently to unseen data. This is especially the
case for small training datasets.
Citation: https://arxiv.org/abs/2010.08240
Usage:
python train_sts_seed_optimization.py
OR
python train_sts_seed_optimization.py pretrained_transformer_model_name seed_count stop_after
python train_sts_seed_optimization.py bert-base-uncased 10 0.3
"""
import csv
import gzip
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
seed_count = int(sys.argv[2]) if len(sys.argv) > 2 else 10
stop_after = float(sys.argv[3]) if len(sys.argv) > 3 else 0.3
logging.info(f"Train and Evaluate: {seed_count} Random Seeds")
for seed in range(seed_count):
# Setting seed for all random initializations
logging.info(f"##### Seed {seed} #####")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Read the dataset
train_batch_size = 16
num_epochs = 1
model_save_path = "output/bi-encoder/training_stsbenchmark_" + model_name + "/seed-" + str(seed)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
# Stopping and Evaluating after 30% of training data (less than 1 epoch)
# We find from (Dodge et al.) that 20-30% is often ideal for convergence of random seed
steps_per_epoch = math.ceil(len(train_dataloader) * stop_after)
logging.info(f"Warmup-steps: {warmup_steps}")
logging.info(f"Early-stopping: {int(stop_after * 100)}% of the training-data")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
__version__ = '0.18.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.18.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook'
]
|
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
|
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.error(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
|
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
from transformers.modelcard import ModelCard, TrainingSummary
class ModelCardTester(unittest.TestCase):
def setUp(self):
self.inputs_dict = {
"model_details": {
"Organization": "testing",
"Model date": "today",
"Model version": "v2.1, Developed by Test Corp in 2019.",
"Architecture": "Convolutional Neural Network.",
},
"metrics": "BLEU and ROUGE-1",
"evaluation_data": {
"Datasets": {"BLEU": "My-great-dataset-v1", "ROUGE-1": "My-short-dataset-v2.1"},
"Preprocessing": "See details on https://huggingface.co/papers/1810.03993",
},
"training_data": {
"Dataset": "English Wikipedia dump dated 2018-12-01",
"Preprocessing": (
"Using SentencePiece vocabulary of size 52k tokens. See details on"
" https://huggingface.co/papers/1810.03993"
),
},
"quantitative_analyses": {"BLEU": 55.1, "ROUGE-1": 76},
}
def test_model_card_common_properties(self):
modelcard = ModelCard.from_dict(self.inputs_dict)
self.assertTrue(hasattr(modelcard, "model_details"))
self.assertTrue(hasattr(modelcard, "intended_use"))
self.assertTrue(hasattr(modelcard, "factors"))
self.assertTrue(hasattr(modelcard, "metrics"))
self.assertTrue(hasattr(modelcard, "evaluation_data"))
self.assertTrue(hasattr(modelcard, "training_data"))
self.assertTrue(hasattr(modelcard, "quantitative_analyses"))
self.assertTrue(hasattr(modelcard, "ethical_considerations"))
self.assertTrue(hasattr(modelcard, "caveats_and_recommendations"))
def test_model_card_to_json_string(self):
modelcard = ModelCard.from_dict(self.inputs_dict)
obj = json.loads(modelcard.to_json_string())
for key, value in self.inputs_dict.items():
self.assertEqual(obj[key], value)
def test_model_card_to_json_file(self):
model_card_first = ModelCard.from_dict(self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, "modelcard.json")
model_card_first.to_json_file(filename)
model_card_second = ModelCard.from_json_file(filename)
self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
def test_model_card_from_and_save_pretrained(self):
model_card_first = ModelCard.from_dict(self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model_card_first.save_pretrained(tmpdirname)
model_card_second = ModelCard.from_pretrained(tmpdirname)
self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
def test_model_summary_modelcard_base_metadata(self):
metadata = TrainingSummary("Model name").create_metadata()
self.assertTrue("library_name" in metadata)
self.assertTrue(metadata["library_name"] == "transformers")
|
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
from transformers.modelcard import ModelCard, TrainingSummary
class ModelCardTester(unittest.TestCase):
def setUp(self):
self.inputs_dict = {
"model_details": {
"Organization": "testing",
"Model date": "today",
"Model version": "v2.1, Developed by Test Corp in 2019.",
"Architecture": "Convolutional Neural Network.",
},
"metrics": "BLEU and ROUGE-1",
"evaluation_data": {
"Datasets": {"BLEU": "My-great-dataset-v1", "ROUGE-1": "My-short-dataset-v2.1"},
"Preprocessing": "See details on https://arxiv.org/pdf/1810.03993.pdf",
},
"training_data": {
"Dataset": "English Wikipedia dump dated 2018-12-01",
"Preprocessing": (
"Using SentencePiece vocabulary of size 52k tokens. See details on"
" https://arxiv.org/pdf/1810.03993.pdf"
),
},
"quantitative_analyses": {"BLEU": 55.1, "ROUGE-1": 76},
}
def test_model_card_common_properties(self):
modelcard = ModelCard.from_dict(self.inputs_dict)
self.assertTrue(hasattr(modelcard, "model_details"))
self.assertTrue(hasattr(modelcard, "intended_use"))
self.assertTrue(hasattr(modelcard, "factors"))
self.assertTrue(hasattr(modelcard, "metrics"))
self.assertTrue(hasattr(modelcard, "evaluation_data"))
self.assertTrue(hasattr(modelcard, "training_data"))
self.assertTrue(hasattr(modelcard, "quantitative_analyses"))
self.assertTrue(hasattr(modelcard, "ethical_considerations"))
self.assertTrue(hasattr(modelcard, "caveats_and_recommendations"))
def test_model_card_to_json_string(self):
modelcard = ModelCard.from_dict(self.inputs_dict)
obj = json.loads(modelcard.to_json_string())
for key, value in self.inputs_dict.items():
self.assertEqual(obj[key], value)
def test_model_card_to_json_file(self):
model_card_first = ModelCard.from_dict(self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filename = os.path.join(tmpdirname, "modelcard.json")
model_card_first.to_json_file(filename)
model_card_second = ModelCard.from_json_file(filename)
self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
def test_model_card_from_and_save_pretrained(self):
model_card_first = ModelCard.from_dict(self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model_card_first.save_pretrained(tmpdirname)
model_card_second = ModelCard.from_pretrained(tmpdirname)
self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
def test_model_summary_modelcard_base_metadata(self):
metadata = TrainingSummary("Model name").create_metadata()
self.assertTrue("library_name" in metadata)
self.assertTrue(metadata["library_name"] == "transformers")
|
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
TwitterOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = ["This is sentence {}".format(i) for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = ["This is sentence {}".format(i) for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.utils import Registry, build_from_cfg
TRANSFORMER = Registry('Transformer')
LINEAR_LAYERS = Registry('linear layers')
def build_transformer(cfg, default_args=None):
"""Builder for Transformer."""
return build_from_cfg(cfg, TRANSFORMER, default_args)
LINEAR_LAYERS.register_module('Linear', module=nn.Linear)
def build_linear_layer(cfg, *args, **kwargs):
"""Build linear layer.
Args:
cfg (None or dict): The linear layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate an linear layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding linear layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding linear layer.
Returns:
nn.Module: Created linear layer.
"""
if cfg is None:
cfg_ = dict(type='Linear')
else:
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type' not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in LINEAR_LAYERS:
raise KeyError(f'Unrecognized linear type {layer_type}')
else:
linear_layer = LINEAR_LAYERS.get(layer_type)
layer = linear_layer(*args, **kwargs, **cfg_)
return layer
|
import torch.nn as nn
from mmcv.utils import Registry, build_from_cfg
TRANSFORMER = Registry('Transformer')
LINEAR_LAYERS = Registry('linear layers')
def build_transformer(cfg, default_args=None):
"""Builder for Transformer."""
return build_from_cfg(cfg, TRANSFORMER, default_args)
LINEAR_LAYERS.register_module('Linear', module=nn.Linear)
def build_linear_layer(cfg, *args, **kwargs):
"""Build linear layer.
Args:
cfg (None or dict): The linear layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate an linear layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding linear layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding linear layer.
Returns:
nn.Module: Created linear layer.
"""
if cfg is None:
cfg_ = dict(type='Linear')
else:
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type' not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in LINEAR_LAYERS:
raise KeyError(f'Unrecognized linear type {layer_type}')
else:
linear_layer = LINEAR_LAYERS.get(layer_type)
layer = linear_layer(*args, **kwargs, **cfg_)
return layer
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json")) as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def before_epoch(self, runner) -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
"""
self.t = time.time()
def before_iter(self, runner, data_batch: DATA_BATCH = None) -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'data_time': time.time() - self.t})
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'time': time.time() - self.t})
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def before_epoch(self, runner: object) -> None:
"""Record time flag before start a epoch.
Args:
runner (object): The runner of the training process.
"""
self.t = time.time()
def before_iter(
self,
runner: object,
data_batch: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[BaseDataSample]): Data from dataloader.
Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({ # type: ignore
'data_time': time.time() - self.t
})
def after_iter(self,
runner: object,
data_batch: Optional[Sequence[BaseDataSample]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[BaseDataSample]): Data from dataloader.
Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({ # type: ignore
'time': time.time() - self.t
})
self.t = time.time()
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
class FaissLMDBSearcher(Executor):
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._vec_indexer = FaissSearcher(dump_path=dump_path, *args, **kwargs)
self._kv_indexer = LMDBStorage(dump_path=dump_path, *args, **kwargs)
@requests(on="/search")
def search(self, docs: "DocumentArray", parameters: Dict = None, **kwargs):
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters["traversal_paths"] = [
path + "m" for path in kv_parameters.get("traversal_paths", ["r"])
]
self._kv_indexer.search(docs, parameters=kv_parameters)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
try:
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
except: # broken import paths in previous release
from jina_executors.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
try:
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
except: # broken import paths in previous release
from jina_executors.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
class FaissLMDBSearcher(Executor):
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._vec_indexer = FaissSearcher(dump_path=dump_path, *args, **kwargs)
self._kv_indexer = LMDBStorage(dump_path=dump_path, *args, **kwargs)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, parameters=kv_parameters)
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.7.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.6.0"
@keras_export("keras.version")
def version():
return __version__
|
import importlib
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial002",
pytest.param("tutorial002_py310", marks=needs_py310),
"tutorial002_an",
pytest.param("tutorial002_an_py39", marks=needs_py39),
pytest.param("tutorial002_an_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.header_params.{request.param}")
client = TestClient(mod.app)
return client
@pytest.mark.parametrize(
"path,headers,expected_status,expected_response",
[
("/items", None, 200, {"strange_header": None}),
("/items", {"X-Header": "notvalid"}, 200, {"strange_header": None}),
(
"/items",
{"strange_header": "FastAPI test"},
200,
{"strange_header": "FastAPI test"},
),
(
"/items",
{"strange-header": "Not really underscore"},
200,
{"strange_header": None},
),
],
)
def test(path, headers, expected_status, expected_response, client: TestClient):
response = client.get(path, headers=headers)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": IsDict(
{
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Strange Header",
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Strange Header", "type": "string"}
),
"name": "strange_header",
"in": "header",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from docs_src.header_params.tutorial002 import app
client = TestClient(app)
@pytest.mark.parametrize(
"path,headers,expected_status,expected_response",
[
("/items", None, 200, {"strange_header": None}),
("/items", {"X-Header": "notvalid"}, 200, {"strange_header": None}),
(
"/items",
{"strange_header": "FastAPI test"},
200,
{"strange_header": "FastAPI test"},
),
(
"/items",
{"strange-header": "Not really underscore"},
200,
{"strange_header": None},
),
],
)
def test(path, headers, expected_status, expected_response):
response = client.get(path, headers=headers)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": IsDict(
{
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Strange Header",
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Strange Header", "type": "string"}
),
"name": "strange_header",
"in": "header",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
|
_base_ = './lsj-100e_coco-detection.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
_base_ = './lsj_100e_coco_detection.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Union
import torch
from mmengine.device import is_cuda_available, is_musa_available
from mmengine.dist.utils import master_only
from mmengine.logging import MMLogger, print_log
class TimeCounter:
"""A tool that counts the average running time of a function or a method.
Users can use it as a decorator or context manager to calculate the average
running time of code blocks.
Args:
log_interval (int): The interval of logging. Defaults to 1.
warmup_interval (int): The interval of warmup. Defaults to 1.
with_sync (bool): Whether to synchronize cuda. Defaults to True.
tag (str, optional): Function tag. Used to distinguish between
different functions or methods being called. Defaults to None.
logger (MMLogger, optional): Formatted logger used to record messages.
Defaults to None.
Examples:
>>> import time
>>> from mmengine.utils.dl_utils import TimeCounter
>>> @TimeCounter()
... def fun1():
... time.sleep(0.1)
... fun1()
[fun1]-time per run averaged in the past 1 runs: 100.0 ms
>>> @@TimeCounter(log_interval=2, tag='fun')
... def fun2():
... time.sleep(0.2)
>>> for _ in range(3):
... fun2()
[fun]-time per run averaged in the past 2 runs: 200.0 ms
>>> with TimeCounter(tag='fun3'):
... time.sleep(0.3)
[fun3]-time per run averaged in the past 1 runs: 300.0 ms
"""
instance_dict: dict = dict()
log_interval: int
warmup_interval: int
logger: Optional[MMLogger]
__count: int
__pure_inf_time: float
def __new__(cls,
log_interval: int = 1,
warmup_interval: int = 1,
with_sync: bool = True,
tag: Optional[str] = None,
logger: Optional[MMLogger] = None):
assert warmup_interval >= 1
if tag is not None and tag in cls.instance_dict:
return cls.instance_dict[tag]
instance = super().__new__(cls)
cls.instance_dict[tag] = instance
instance.log_interval = log_interval
instance.warmup_interval = warmup_interval
instance.with_sync = with_sync # type: ignore
instance.tag = tag
instance.logger = logger
instance.__count = 0
instance.__pure_inf_time = 0.
instance.__start_time = 0.
return instance
@master_only
def __call__(self, fn):
if self.tag is None:
self.tag = fn.__name__
def wrapper(*args, **kwargs):
self.__count += 1
if self.with_sync:
if is_cuda_available():
torch.cuda.synchronize()
elif is_musa_available():
torch.musa.synchronize()
start_time = time.perf_counter()
result = fn(*args, **kwargs)
if self.with_sync:
if is_cuda_available():
torch.cuda.synchronize()
elif is_musa_available():
torch.musa.synchronize()
elapsed = time.perf_counter() - start_time
self.print_time(elapsed)
return result
return wrapper
@master_only
def __enter__(self):
assert self.tag is not None, 'In order to clearly distinguish ' \
'printing information in different ' \
'contexts, please specify the ' \
'tag parameter'
self.__count += 1
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.__start_time = time.perf_counter()
@master_only
def __exit__(self, exc_type, exc_val, exc_tb):
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
elapsed = time.perf_counter() - self.__start_time
self.print_time(elapsed)
def print_time(self, elapsed: Union[int, float]) -> None:
"""Print times per count."""
if self.__count >= self.warmup_interval:
self.__pure_inf_time += elapsed
if self.__count % self.log_interval == 0:
times_per_count = 1000 * self.__pure_inf_time / (
self.__count - self.warmup_interval + 1)
print_log(
f'[{self.tag}]-time per run averaged in the past '
f'{self.__count} runs: {times_per_count:.1f} ms',
self.logger)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Union
import torch
from mmengine.device import is_cuda_available, is_musa_available
from mmengine.dist.utils import master_only
from mmengine.logging import MMLogger, print_log
class TimeCounter:
"""A tool that counts the average running time of a function or a method.
Users can use it as a decorator or context manager to calculate the average
running time of code blocks.
Args:
log_interval (int): The interval of logging. Defaults to 1.
warmup_interval (int): The interval of warmup. Defaults to 1.
with_sync (bool): Whether to synchronize cuda. Defaults to True.
tag (str, optional): Function tag. Used to distinguish between
different functions or methods being called. Defaults to None.
logger (MMLogger, optional): Formatted logger used to record messages.
Defaults to None.
Examples:
>>> import time
>>> from mmengine.utils.dl_utils import TimeCounter
>>> @TimeCounter()
... def fun1():
... time.sleep(0.1)
... fun1()
[fun1]-time per run averaged in the past 1 runs: 100.0 ms
>>> @@TimeCounter(log_interval=2, tag='fun')
... def fun2():
... time.sleep(0.2)
>>> for _ in range(3):
... fun2()
[fun]-time per run averaged in the past 2 runs: 200.0 ms
>>> with TimeCounter(tag='fun3'):
... time.sleep(0.3)
[fun3]-time per run averaged in the past 1 runs: 300.0 ms
"""
instance_dict: dict = dict()
log_interval: int
warmup_interval: int
logger: Optional[MMLogger]
__count: int
__pure_inf_time: float
def __new__(cls,
log_interval: int = 1,
warmup_interval: int = 1,
with_sync: bool = True,
tag: Optional[str] = None,
logger: Optional[MMLogger] = None):
assert warmup_interval >= 1
if tag is not None and tag in cls.instance_dict:
return cls.instance_dict[tag]
instance = super().__new__(cls)
cls.instance_dict[tag] = instance
instance.log_interval = log_interval
instance.warmup_interval = warmup_interval
instance.with_sync = with_sync
instance.tag = tag
instance.logger = logger
instance.__count = 0
instance.__pure_inf_time = 0.
instance.__start_time = 0.
return instance
@master_only
def __call__(self, fn):
if self.tag is None:
self.tag = fn.__name__
def wrapper(*args, **kwargs):
self.__count += 1
if self.with_sync:
if is_cuda_available():
torch.cuda.synchronize()
elif is_musa_available():
torch.musa.synchronize()
start_time = time.perf_counter()
result = fn(*args, **kwargs)
if self.with_sync:
if is_cuda_available():
torch.cuda.synchronize()
elif is_musa_available():
torch.musa.synchronize()
elapsed = time.perf_counter() - start_time
self.print_time(elapsed)
return result
return wrapper
@master_only
def __enter__(self):
assert self.tag is not None, 'In order to clearly distinguish ' \
'printing information in different ' \
'contexts, please specify the ' \
'tag parameter'
self.__count += 1
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.__start_time = time.perf_counter()
@master_only
def __exit__(self, exc_type, exc_val, exc_tb):
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
elapsed = time.perf_counter() - self.__start_time
self.print_time(elapsed)
def print_time(self, elapsed: Union[int, float]) -> None:
"""print times per count."""
if self.__count >= self.warmup_interval:
self.__pure_inf_time += elapsed
if self.__count % self.log_interval == 0:
times_per_count = 1000 * self.__pure_inf_time / (
self.__count - self.warmup_interval + 1)
print_log(
f'[{self.tag}]-time per run averaged in the past '
f'{self.__count} runs: {times_per_count:.1f} ms',
self.logger)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import deserialize_keras_object
from keras.src.legacy.saving.serialization import serialize_keras_object
|
"""Optimization related classes and functions."""
import logging
from typing import Any, Dict, List, Optional, Literal
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = "Given the context, please answer the final question"
def format_metadata(nodes=List[NodeWithScore]):
return {node.node.id_: node.metadata for node in nodes}
class LongLLMLinguaPostprocessor(BaseNodePostprocessor):
"""
Optimization of nodes.
Compress using LongLLMLingua paper.
"""
instruction_str: str = Field(
default=DEFAULT_INSTRUCTION_STR, description="Instruction string."
)
target_token: int = Field(
default=-1, description="Target number of compressed tokens."
)
use_llmlingua2: bool = Field(
default=False, description="Whether to use the llmlingua2 approach"
)
rank_method: str = Field(default="longllmlingua", description="Ranking method.")
additional_compress_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional compress kwargs."
)
_llm_lingua: Any = PrivateAttr()
def __init__(
self,
model_name: str = "NousResearch/Llama-2-7b-hf",
device_map: Literal["cuda", "cpu", "mps"] = "cuda",
model_config: Optional[dict] = {},
open_api_config: Optional[dict] = {},
instruction_str: str = DEFAULT_INSTRUCTION_STR,
target_token: float = -1,
rank_method: str = "longllmlingua",
additional_compress_kwargs: Optional[Dict[str, Any]] = {},
use_llmlingua2: bool = False,
):
"""LongLLMLingua Compressor for Node Context."""
from llmlingua import PromptCompressor
super().__init__(
instruction_str=instruction_str,
target_token=target_token,
rank_method=rank_method,
additional_compress_kwargs=additional_compress_kwargs,
use_llmlingua2=use_llmlingua2,
)
open_api_config = open_api_config or {}
additional_compress_kwargs = additional_compress_kwargs or {}
if self.use_llmlingua2 is True:
assert (
model_name == "microsoft/llmlingua-2-xlm-roberta-large-meetingbank"
), 'Must use "microsoft/llmlingua-2-xlm-roberta-large-meetingbank" as the model name for llmlingua2'
self._llm_lingua = PromptCompressor(
model_name=model_name,
device_map=device_map,
model_config=model_config,
open_api_config=open_api_config,
use_llmlingua2=self.use_llmlingua2,
)
@classmethod
def class_name(cls) -> str:
return "LongLLMLinguaPostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Optimize a node text given the query by shortening the node text."""
if query_bundle is None:
raise ValueError("Query bundle is required.")
# The prompt compression for llmlingua2 works on raw texts, that's why it's better to just extract metadata texts.
context_texts = [n.text for n in nodes]
# Preserve metadata for prompt compressed nodes
metadata = format_metadata(nodes)
new_context_texts = "".join(context_texts)
# You can use it this way, although the question-aware fine-grained compression hasn't been enabled.
compressed_prompt = self._llm_lingua.compress_prompt(
new_context_texts, # ! Replace the previous context_list
instruction=self.instruction_str,
question=query_bundle.query_str,
# target_token=2000,
target_token=self.target_token,
rank_method=self.rank_method,
**self.additional_compress_kwargs,
)
compressed_prompt_txt = compressed_prompt["compressed_prompt"]
# separate out the question and instruction (appended to top and bottom)
compressed_prompt_txt_list = compressed_prompt_txt.split("\n\n")
if self.use_llmlingua2 is False:
compressed_prompt_txt_list = compressed_prompt_txt_list[1:-1]
# return nodes for each list
keys_to_exclude = list(metadata.keys())
return [
NodeWithScore(
node=TextNode(
text=t,
metadata=metadata,
excluded_llm_metadata_keys=keys_to_exclude,
excluded_embed_metadata_keys=keys_to_exclude,
)
)
for t in compressed_prompt_txt_list
]
|
"""Optimization related classes and functions."""
import logging
from typing import Any, Dict, List, Optional, Literal
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = "Given the context, please answer the final question"
def format_metadata(nodes=List[NodeWithScore]):
return {node.node.id_: node.metadata for node in nodes}
class LongLLMLinguaPostprocessor(BaseNodePostprocessor):
"""Optimization of nodes.
Compress using LongLLMLingua paper.
"""
instruction_str: str = Field(
default=DEFAULT_INSTRUCTION_STR, description="Instruction string."
)
target_token: int = Field(
default=-1, description="Target number of compressed tokens."
)
use_llmlingua2: bool = Field(
default=False, description="Whether to use the llmlingua2 approach"
)
rank_method: str = Field(default="longllmlingua", description="Ranking method.")
additional_compress_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional compress kwargs."
)
_llm_lingua: Any = PrivateAttr()
def __init__(
self,
model_name: str = "NousResearch/Llama-2-7b-hf",
device_map: Literal["cuda", "cpu", "mps"] = "cuda",
model_config: Optional[dict] = {},
open_api_config: Optional[dict] = {},
instruction_str: str = DEFAULT_INSTRUCTION_STR,
target_token: float = -1,
rank_method: str = "longllmlingua",
additional_compress_kwargs: Optional[Dict[str, Any]] = {},
use_llmlingua2: bool = False,
):
"""LongLLMLingua Compressor for Node Context."""
from llmlingua import PromptCompressor
super().__init__(
instruction_str=instruction_str,
target_token=target_token,
rank_method=rank_method,
additional_compress_kwargs=additional_compress_kwargs,
use_llmlingua2=use_llmlingua2,
)
open_api_config = open_api_config or {}
additional_compress_kwargs = additional_compress_kwargs or {}
if self.use_llmlingua2 is True:
assert (
model_name == "microsoft/llmlingua-2-xlm-roberta-large-meetingbank"
), 'Must use "microsoft/llmlingua-2-xlm-roberta-large-meetingbank" as the model name for llmlingua2'
self._llm_lingua = PromptCompressor(
model_name=model_name,
device_map=device_map,
model_config=model_config,
open_api_config=open_api_config,
use_llmlingua2=self.use_llmlingua2,
)
@classmethod
def class_name(cls) -> str:
return "LongLLMLinguaPostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Optimize a node text given the query by shortening the node text."""
if query_bundle is None:
raise ValueError("Query bundle is required.")
# The prompt compression for llmlingua2 works on raw texts, that's why it's better to just extract metadata texts.
context_texts = [n.text for n in nodes]
# Preserve metadata for prompt compressed nodes
metadata = format_metadata(nodes)
new_context_texts = "".join(context_texts)
# You can use it this way, although the question-aware fine-grained compression hasn't been enabled.
compressed_prompt = self._llm_lingua.compress_prompt(
new_context_texts, # ! Replace the previous context_list
instruction=self.instruction_str,
question=query_bundle.query_str,
# target_token=2000,
target_token=self.target_token,
rank_method=self.rank_method,
**self.additional_compress_kwargs,
)
compressed_prompt_txt = compressed_prompt["compressed_prompt"]
# separate out the question and instruction (appended to top and bottom)
compressed_prompt_txt_list = compressed_prompt_txt.split("\n\n")
if self.use_llmlingua2 is False:
compressed_prompt_txt_list = compressed_prompt_txt_list[1:-1]
# return nodes for each list
keys_to_exclude = list(metadata.keys())
return [
NodeWithScore(
node=TextNode(
text=t,
metadata=metadata,
excluded_llm_metadata_keys=keys_to_exclude,
excluded_embed_metadata_keys=keys_to_exclude,
)
)
for t in compressed_prompt_txt_list
]
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.inputs)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
reason=f"Ran block {entry.block_id} {block.name}",
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
inputs={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
reason=f"Ran block {entry.block_id} {block.name}",
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
"""Methods and algorithms to robustly estimate covariance.
They estimate the covariance of features at given sets of points, as well as the
precision matrix defined as the inverse of the covariance. Covariance estimation is
closely related to the theory of Gaussian graphical models.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._elliptic_envelope import EllipticEnvelope
from ._empirical_covariance import (
EmpiricalCovariance,
empirical_covariance,
log_likelihood,
)
from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso
from ._robust_covariance import MinCovDet, fast_mcd
from ._shrunk_covariance import (
OAS,
LedoitWolf,
ShrunkCovariance,
ledoit_wolf,
ledoit_wolf_shrinkage,
oas,
shrunk_covariance,
)
__all__ = [
"OAS",
"EllipticEnvelope",
"EmpiricalCovariance",
"GraphicalLasso",
"GraphicalLassoCV",
"LedoitWolf",
"MinCovDet",
"ShrunkCovariance",
"empirical_covariance",
"fast_mcd",
"graphical_lasso",
"ledoit_wolf",
"ledoit_wolf_shrinkage",
"log_likelihood",
"oas",
"shrunk_covariance",
]
|
"""Methods and algorithms to robustly estimate covariance.
They estimate the covariance of features at given sets of points, as well as the
precision matrix defined as the inverse of the covariance. Covariance estimation is
closely related to the theory of Gaussian graphical models.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._elliptic_envelope import EllipticEnvelope
from ._empirical_covariance import (
EmpiricalCovariance,
empirical_covariance,
log_likelihood,
)
from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso
from ._robust_covariance import MinCovDet, fast_mcd
from ._shrunk_covariance import (
OAS,
LedoitWolf,
ShrunkCovariance,
ledoit_wolf,
ledoit_wolf_shrinkage,
oas,
shrunk_covariance,
)
__all__ = [
"EllipticEnvelope",
"EmpiricalCovariance",
"GraphicalLasso",
"GraphicalLassoCV",
"LedoitWolf",
"MinCovDet",
"OAS",
"ShrunkCovariance",
"empirical_covariance",
"fast_mcd",
"graphical_lasso",
"ledoit_wolf",
"ledoit_wolf_shrinkage",
"log_likelihood",
"oas",
"shrunk_covariance",
]
|
# mypy: ignore-errors
import contextlib
import functools
import inspect
import torch
# Test whether hardware BF32 math mode enabled. It is enabled only on:
# - MKLDNN is available
# - BF16 is supported by MKLDNN
def bf32_is_not_fp32():
if not torch.backends.mkldnn.is_available():
return False
if not torch.ops.mkldnn._is_mkldnn_bf16_supported():
return False
return True
@contextlib.contextmanager
def bf32_off():
old_matmul_precision = torch.backends.mkldnn.matmul.fp32_precision
old_conv_precision = torch.backends.mkldnn.conv.fp32_precision
try:
torch.backends.mkldnn.matmul.fp32_precision = "ieee"
torch.backends.mkldnn.conv.fp32_precision = "ieee"
yield
finally:
torch.backends.mkldnn.matmul.fp32_precision = old_matmul_precision
torch.backends.mkldnn.conv.fp32_precision = old_conv_precision
@contextlib.contextmanager
def bf32_on(self, bf32_precision=1e-2):
old_matmul_precision = torch.backends.mkldnn.matmul.fp32_precision
old_conv_precision = torch.backends.mkldnn.conv.fp32_precision
old_precision = self.precision
try:
torch.backends.mkldnn.matmul.fp32_precision = "bf16"
torch.backends.mkldnn.conv.fp32_precision = "bf16"
self.precision = bf32_precision
yield
finally:
torch.backends.mkldnn.matmul.fp32_precision = old_matmul_precision
torch.backends.mkldnn.conv.fp32_precision = old_conv_precision
self.precision = old_precision
# This is a wrapper that wraps a test to run this test twice, one with
# allow_bf32=True, another with allow_bf32=False. When running with
# allow_bf32=True, it will use reduced precision as specified by the
# argument
def bf32_on_and_off(bf32_precision=1e-2):
def with_bf32_disabled(self, function_call):
with bf32_off():
function_call()
def with_bf32_enabled(self, function_call):
with bf32_on(self, bf32_precision):
function_call()
def wrapper(f):
params = inspect.signature(f).parameters
arg_names = tuple(params.keys())
@functools.wraps(f)
def wrapped(*args, **kwargs):
kwargs.update(zip(arg_names, args))
cond = bf32_is_not_fp32()
if "device" in kwargs:
cond = cond and (torch.device(kwargs["device"]).type == "cpu")
if "dtype" in kwargs:
cond = cond and (kwargs["dtype"] == torch.float)
if cond:
with_bf32_disabled(kwargs["self"], lambda: f(**kwargs))
with_bf32_enabled(kwargs["self"], lambda: f(**kwargs))
else:
f(**kwargs)
return wrapped
return wrapper
|
# mypy: ignore-errors
import contextlib
import functools
import inspect
import torch
# Test whether hardware BF32 math mode enabled. It is enabled only on:
# - MKLDNN is available
# - BF16 is supported by MKLDNN
def bf32_is_not_fp32():
if not torch.backends.mkldnn.is_available():
return False
if not torch.ops.mkldnn._is_mkldnn_bf16_supported():
return False
return True
@contextlib.contextmanager
def bf32_off():
old_matmul_precision = torch.get_float32_matmul_precision()
try:
torch.set_float32_matmul_precision("highest")
yield
finally:
torch.set_float32_matmul_precision(old_matmul_precision)
@contextlib.contextmanager
def bf32_on(self, bf32_precision=1e-5):
old_matmul_precision = torch.get_float32_matmul_precision()
old_precision = self.precision
try:
torch.set_float32_matmul_precision("medium")
self.precision = bf32_precision
yield
finally:
torch.set_float32_matmul_precision(old_matmul_precision)
self.precision = old_precision
# This is a wrapper that wraps a test to run this test twice, one with
# allow_bf32=True, another with allow_bf32=False. When running with
# allow_bf32=True, it will use reduced precision as specified by the
# argument
def bf32_on_and_off(bf32_precision=1e-5):
def with_bf32_disabled(self, function_call):
with bf32_off():
function_call()
def with_bf32_enabled(self, function_call):
with bf32_on(self, bf32_precision):
function_call()
def wrapper(f):
params = inspect.signature(f).parameters
arg_names = tuple(params.keys())
@functools.wraps(f)
def wrapped(*args, **kwargs):
kwargs.update(zip(arg_names, args))
cond = bf32_is_not_fp32()
if "device" in kwargs:
cond = cond and (torch.device(kwargs["device"]).type == "cpu")
if "dtype" in kwargs:
cond = cond and (kwargs["dtype"] == torch.float)
if cond:
with_bf32_disabled(kwargs["self"], lambda: f(**kwargs))
with_bf32_enabled(kwargs["self"], lambda: f(**kwargs))
else:
f(**kwargs)
return wrapped
return wrapper
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[DEPRECATED] Use ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`` instead.
Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] [DEPRECATED] Use ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`` instead.
Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. v2betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import time
from queue import Queue
from threading import Event
from typing import Any, Generator, List, Optional
from uuid import UUID
from llama_index.core.bridge.langchain import BaseCallbackHandler, LLMResult
class StreamingGeneratorCallbackHandler(BaseCallbackHandler):
"""Streaming callback handler."""
def __init__(self) -> None:
self._token_queue: Queue = Queue()
self._done = Event()
def __deepcopy__(self, memo: Any) -> "StreamingGeneratorCallbackHandler":
# NOTE: hack to bypass deepcopy in langchain
return self
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
self._token_queue.put_nowait(token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self._done.set()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
self._done.set()
def get_response_gen(self, timeout: float = 120.0) -> Generator:
"""
Get response generator with timeout.
Args:
timeout (float): Maximum time in seconds to wait for the complete response.
Defaults to 120 seconds.
"""
start_time = time.time()
while True:
if time.time() - start_time > timeout:
raise TimeoutError(
f"Response generation timed out after {timeout} seconds"
)
if not self._token_queue.empty():
token = self._token_queue.get_nowait()
yield token
elif self._done.is_set():
break
else:
# Small sleep to prevent CPU spinning
time.sleep(0.01)
|
import time
from queue import Queue
from threading import Event
from typing import Any, Generator, List, Optional
from uuid import UUID
from llama_index.core.bridge.langchain import BaseCallbackHandler, LLMResult
class StreamingGeneratorCallbackHandler(BaseCallbackHandler):
"""Streaming callback handler."""
def __init__(self) -> None:
self._token_queue: Queue = Queue()
self._done = Event()
def __deepcopy__(self, memo: Any) -> "StreamingGeneratorCallbackHandler":
# NOTE: hack to bypass deepcopy in langchain
return self
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
self._token_queue.put_nowait(token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self._done.set()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
self._done.set()
def get_response_gen(self, timeout: float = 120.0) -> Generator:
"""Get response generator with timeout.
Args:
timeout (float): Maximum time in seconds to wait for the complete response.
Defaults to 120 seconds.
"""
start_time = time.time()
while True:
if time.time() - start_time > timeout:
raise TimeoutError(
f"Response generation timed out after {timeout} seconds"
)
if not self._token_queue.empty():
token = self._token_queue.get_nowait()
yield token
elif self._done.is_set():
break
else:
# Small sleep to prevent CPU spinning
time.sleep(0.01)
|
import pytest # type: ignore[import-not-found, import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest # type: ignore[import-not-found, import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
from typing import List, Optional
from llama_index.core.node_parser.text import TokenTextSplitter
from llama_index.core.node_parser.text.utils import truncate_text
from llama_index.core.schema import BaseNode
def get_numbered_text_from_nodes(
node_list: List[BaseNode],
text_splitter: Optional[TokenTextSplitter] = None,
) -> str:
"""
Get text from nodes in the format of a numbered list.
Used by tree-structured indices.
"""
results = []
number = 1
for node in node_list:
node_text = " ".join(node.get_content().splitlines())
if text_splitter is not None:
node_text = truncate_text(node_text, text_splitter)
text = f"({number}) {node_text}"
results.append(text)
number += 1
return "\n\n".join(results)
|
from typing import List, Optional
from llama_index.core.node_parser.text import TokenTextSplitter
from llama_index.core.node_parser.text.utils import truncate_text
from llama_index.core.schema import BaseNode
def get_numbered_text_from_nodes(
node_list: List[BaseNode],
text_splitter: Optional[TokenTextSplitter] = None,
) -> str:
"""Get text from nodes in the format of a numbered list.
Used by tree-structured indices.
"""
results = []
number = 1
for node in node_list:
node_text = " ".join(node.get_content().splitlines())
if text_splitter is not None:
node_text = truncate_text(node_text, text_splitter)
text = f"({number}) {node_text}"
results.append(text)
number += 1
return "\n\n".join(results)
|
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
'ImageBytes',
'VideoBytes',
'AudioBytes',
]
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa
__all__.extend(
[
'TensorFlowTensor',
'TensorFlowEmbedding',
'AudioTensorFlowTensor',
'ImageTensorFlowTensor',
'VideoTensorFlowTensor',
]
)
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa
__all__.extend(
[
'TensorFlowTensor',
'TensorFlowEmbedding',
'AudioTensorFlowTensor',
'ImageTensorFlowTensor',
'VideoTensorFlowTensor',
]
)
|
"""System message."""
from typing import Any, Literal, Union
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage):
"""Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
"""
type: Literal["system"] = "system"
"""The type of the message (used for serialization). Defaults to "system"."""
def __init__(
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg.
Args:
content: The string contents of the message.
kwargs: Additional fields to pass to the message.
"""
super().__init__(content=content, **kwargs)
SystemMessage.model_rebuild()
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "SystemMessageChunk"."""
|
from typing import Any, Literal, Union
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage):
"""Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
"""
type: Literal["system"] = "system"
"""The type of the message (used for serialization). Defaults to "system"."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "messages"].
"""
return ["langchain", "schema", "messages"]
def __init__(
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
) -> None:
"""Pass in content as positional arg.
Args:
content: The string contents of the message.
kwargs: Additional fields to pass to the message.
"""
super().__init__(content=content, **kwargs)
SystemMessage.model_rebuild()
class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""System Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "SystemMessageChunk"."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "messages"].
"""
return ["langchain", "schema", "messages"]
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyExecutor'])
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
prefer_platform=None,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyExecutor']
)
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_dpt import *
from .feature_extraction_dpt import *
from .image_processing_dpt import *
from .image_processing_dpt_fast import *
from .modeling_dpt import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_dpt import *
from .feature_extraction_dpt import *
from .image_processing_dpt import *
from .modeling_dpt import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import numpy as np
import pytest
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.vertices, mesh.faces = mesh.url.load()
assert isinstance(mesh.vertices, np.ndarray)
assert isinstance(mesh.faces, np.ndarray)
|
import numpy as np
import pytest
from docarray import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.vertices, mesh.faces = mesh.url.load()
assert isinstance(mesh.vertices, np.ndarray)
assert isinstance(mesh.faces, np.ndarray)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self._build_at_init()
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='DoubleHeadRoIHead',
reg_roi_scale_factor=1.3,
bbox_head=dict(
_delete_=True,
type='DoubleConvFCBBoxHead',
num_convs=4,
num_fcs=2,
in_channels=256,
conv_out_channels=1024,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0))))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='DoubleHeadRoIHead',
reg_roi_scale_factor=1.3,
bbox_head=dict(
_delete_=True,
type='DoubleConvFCBBoxHead',
num_convs=4,
num_fcs=2,
in_channels=256,
conv_out_channels=1024,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0))))
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
def rgb_to_grayscale(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT], num_output_channels: int = 1
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (datapoints.Image, datapoints.Video)):
inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None
elif isinstance(inpt, torch.Tensor):
old_color_space = datapoints._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type]
else:
old_color_space = None
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = (
f"convert_color_space(..., color_space=datapoints.ColorSpace.GRAY"
f"{f', old_color_space=datapoints.ColorSpace.{old_color_space}' if old_color_space is not None else ''})"
)
if num_output_channels == 3:
replacement = (
f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB"
f"{f', old_color_space=datapoints.ColorSpace.GRAY' if old_color_space is not None else ''})"
)
warnings.warn(
f"The function `rgb_to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.rgb_to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=features.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
def rgb_to_grayscale(
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT], num_output_channels: int = 1
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None
elif isinstance(inpt, torch.Tensor):
old_color_space = features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type]
else:
old_color_space = None
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = (
f"convert_color_space(..., color_space=features.ColorSpace.GRAY"
f"{f', old_color_space=features.ColorSpace.{old_color_space}' if old_color_space is not None else ''})"
)
if num_output_channels == 3:
replacement = (
f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB"
f"{f', old_color_space=features.ColorSpace.GRAY' if old_color_space is not None else ''})"
)
warnings.warn(
f"The function `rgb_to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.rgb_to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
_base_ = './cascade-rcnn_hrnetv2p-w32-20e_coco.py'
# model settings
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py'
# model settings
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
import base64
import os
import pytest
from unittest import mock
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.zhipuai import ZhipuAIMultiModal
from zhipuai.types.chat.chat_completion import (
Completion,
CompletionChoice,
CompletionMessage,
CompletionUsage,
)
_FAKE_API_KEY = "ZHIPUAI_API_KEY"
_FAKE_CHAT_COMPLETIONS_RESPONSE = Completion(
id="some_id",
choices=[
CompletionChoice(
index=0,
finish_reason="stop",
message=CompletionMessage(
role=MessageRole.ASSISTANT,
content="nothing in the video",
),
)
],
usage=CompletionUsage(
prompt_tokens=10,
completion_tokens=10,
total_tokens=20,
),
)
def test_multi_modal_llm_class():
names_of_base_classes = [b.__name__ for b in ZhipuAIMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
def test_multi_modal_llm_series():
llm = ZhipuAIMultiModal(model="glm-4v-plus", api_key="")
assert llm.has_completions_api() is True
llm = ZhipuAIMultiModal(model="cogview-3-plus", api_key="")
assert llm.has_completions_api() is False
llm = ZhipuAIMultiModal(model="cogvideox", api_key="")
assert llm.has_videos_generations_api() is True
def test_get_glm_model_context_size():
llm = ZhipuAIMultiModal(model="glm-4v", api_key="")
assert llm.metadata.context_window > 0
assert llm.model_kwargs
with pytest.raises(ValueError):
llm = ZhipuAIMultiModal(model="glm-x", api_key="")
assert llm.metadata.context_window
def test_fake_llm_chat_and_complete():
messages = [ChatMessage(role=MessageRole.USER, content="describe the video")]
expected_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="nothing in the video",
additional_kwargs={"tool_calls": None},
),
raw=_FAKE_CHAT_COMPLETIONS_RESPONSE,
)
llm = ZhipuAIMultiModal(model="glm-4v-plus", api_key=_FAKE_API_KEY)
with mock.patch.object(
llm._client.chat.completions,
"create",
return_value=_FAKE_CHAT_COMPLETIONS_RESPONSE,
):
actual_response = llm.chat(messages=messages)
assert actual_response == expected_response
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
def test_llm_chat_and_complete():
# test glm-4v completion
llm = ZhipuAIMultiModal(model="glm-4v", api_key=os.getenv("ZHIPUAI_API_KEY"))
with open(os.getenv("ZHIPUAI_TEST_VIDEO"), "rb") as video_file:
video_base = base64.b64encode(video_file.read()).decode("utf-8")
messages = [
ChatMessage(
role=MessageRole.USER,
content=[
{"type": "video_url", "video_url": {"url": video_base}},
{"type": "text", "text": "descript the video"},
],
),
]
assert llm.chat(messages)
assert llm.complete("descript the video", video_url=video_base)
assert llm.stream_chat(messages)
assert llm.stream_complete("descript the video", video_url=video_base)
# test cogview or cogvideox
llm = ZhipuAIMultiModal(
model="cogvideox", api_key=os.getenv("ZHIPUAI_API_KEY"), size="768x1344"
)
messages = [
ChatMessage(
role=MessageRole.USER,
content=[{"type": "text", "text": "a bird flying in the sky"}],
),
]
assert llm.chat(messages)
assert llm.complete("a bird flying in the sky")
@pytest.mark.asyncio
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
async def test_async_llm_chat_and_complete():
# test glm-4v completion
llm = ZhipuAIMultiModal(model="glm-4v", api_key=os.getenv("ZHIPUAI_API_KEY"))
with open(os.getenv("ZHIPUAI_TEST_VIDEO"), "rb") as video_file:
video_base = base64.b64encode(video_file.read()).decode("utf-8")
messages = [
ChatMessage(
role=MessageRole.USER,
content=[
{"type": "video_url", "video_url": {"url": video_base}},
{"type": "text", "text": "descript the video"},
],
),
]
assert await llm.astream_chat(messages)
assert await llm.astream_complete("descript the video", video_url=video_base)
llm = ZhipuAIMultiModal(
model="cogview-3-plus", api_key=os.getenv("ZHIPUAI_API_KEY")
)
assert await llm.acomplete("draw a bird flying in the sky")
|
import base64
import os
import pytest
from unittest import mock
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.zhipuai import ZhipuAIMultiModal
from zhipuai.types.chat.chat_completion import (
Completion,
CompletionChoice,
CompletionMessage,
CompletionUsage,
)
_FAKE_API_KEY = "ZHIPUAI_API_KEY"
_FAKE_CHAT_COMPLETIONS_RESPONSE = Completion(
id="some_id",
choices=[
CompletionChoice(
index=0,
finish_reason="stop",
message=CompletionMessage(
role=MessageRole.ASSISTANT,
content="nothing in the video",
),
)
],
usage=CompletionUsage(
prompt_tokens=10,
completion_tokens=10,
total_tokens=20,
),
)
def test_multi_modal_llm_class():
names_of_base_classes = [b.__name__ for b in ZhipuAIMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
def test_multi_modal_llm_series():
llm = ZhipuAIMultiModal(model="glm-4v-plus", api_key="")
assert llm.has_completions_api() is True
llm = ZhipuAIMultiModal(model="cogview-3-plus", api_key="")
assert llm.has_completions_api() is False
llm = ZhipuAIMultiModal(model="cogvideox", api_key="")
assert llm.has_videos_generations_api() is True
def test_get_glm_model_context_size():
llm = ZhipuAIMultiModal(model="glm-4v", api_key="")
assert llm.metadata.context_window > 0
assert llm.model_kwargs
with pytest.raises(ValueError):
llm = ZhipuAIMultiModal(model="glm-x", api_key="")
assert llm.metadata.context_window
def test_fake_llm_chat_and_complete():
messages = [ChatMessage(role=MessageRole.USER, content="describe the video")]
expected_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="nothing in the video",
additional_kwargs={"tool_calls": None},
),
raw=_FAKE_CHAT_COMPLETIONS_RESPONSE,
)
llm = ZhipuAIMultiModal(model="glm-4v-plus", api_key=_FAKE_API_KEY)
with mock.patch.object(
llm._client.chat.completions,
"create",
return_value=_FAKE_CHAT_COMPLETIONS_RESPONSE,
):
actual_response = llm.chat(messages=messages)
assert actual_response == expected_response
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
def test_llm_chat_and_complete():
# test glm-4v completion
llm = ZhipuAIMultiModal(model="glm-4v", api_key=os.getenv("ZHIPUAI_API_KEY"))
with open(os.getenv("ZHIPUAI_TEST_VIDEO"), "rb") as video_file:
video_base = base64.b64encode(video_file.read()).decode("utf-8")
messages = [
ChatMessage(
role=MessageRole.USER,
content=[
{"type": "video_url", "video_url": {"url": video_base}},
{"type": "text", "text": "descript the video"},
],
),
]
assert llm.chat(messages)
assert llm.complete("descript the video", video_url=video_base)
assert llm.stream_chat(messages)
assert llm.stream_complete("descript the video", video_url=video_base)
# test cogview or cogvideox
llm = ZhipuAIMultiModal(
model="cogvideox", api_key=os.getenv("ZHIPUAI_API_KEY"), size="768x1344"
)
messages = [
ChatMessage(
role=MessageRole.USER,
content=[{"type": "text", "text": "a bird flying in the sky"}],
),
]
assert llm.chat(messages)
assert llm.complete("a bird flying in the sky")
@pytest.mark.asyncio()
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
async def test_async_llm_chat_and_complete():
# test glm-4v completion
llm = ZhipuAIMultiModal(model="glm-4v", api_key=os.getenv("ZHIPUAI_API_KEY"))
with open(os.getenv("ZHIPUAI_TEST_VIDEO"), "rb") as video_file:
video_base = base64.b64encode(video_file.read()).decode("utf-8")
messages = [
ChatMessage(
role=MessageRole.USER,
content=[
{"type": "video_url", "video_url": {"url": video_base}},
{"type": "text", "text": "descript the video"},
],
),
]
assert await llm.astream_chat(messages)
assert await llm.astream_complete("descript the video", video_url=video_base)
llm = ZhipuAIMultiModal(
model="cogview-3-plus", api_key=os.getenv("ZHIPUAI_API_KEY")
)
assert await llm.acomplete("draw a bird flying in the sky")
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from transform_encoder import TransformerTorchEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from typing import Any, Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
DUMMY_DATASET_SCRIPT = dedent("""\
import datasets
class NewDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first"),
datasets.BuilderConfig(name="second"),
]
DEFAULT_CONFIG_NAME = "first"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({"text": datasets.Value("string")}),
)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _generate_examples(self):
for key in range(5):
yield key, {"text": f"{self.config.name}-{key}"}
""")
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
DUMMY_DATASET_SCRIPT = dedent("""\
import datasets
class NewDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first"),
datasets.BuilderConfig(name="second"),
]
DEFAULT_CONFIG_NAME = "first"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({"text": datasets.Value("string")}),
)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _generate_examples(self):
for key in range(5):
yield key, {"text": f"{self.config.name}-{key}"}
""")
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config, ci_hfh_hf_hub_url) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
import base64
import json
from typing import List, Dict, Union, NewType, Any, Optional
import numpy as np
import strawberry
from docarray.math.ndarray import to_list
_ProtoValueType = Union[bool, float, str]
_StructValueType = Union[
_ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]
]
JSONScalar = strawberry.scalar(
NewType('JSONScalar', Any),
serialize=lambda v: v,
parse_value=lambda v: json.loads(v),
description="The GenericScalar scalar type represents a generic GraphQL scalar value that could be: List or Object.",
)
Base64 = strawberry.scalar(
NewType('Base64', bytes),
serialize=lambda v: base64.b64encode(v).decode('utf-8'),
parse_value=lambda v: base64.b64decode(v.encode('utf-8')),
)
NdArray = strawberry.scalar(
NewType('NdArray', bytes),
serialize=lambda v: to_list(v),
parse_value=lambda v: np.array(v),
)
### interface
@strawberry.interface
class _NamedScoreInterface:
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
@strawberry.interface
class _BaseStrawberryDocumentInterface:
id: Optional[str] = None
parent_id: Optional[str] = None
granularity: Optional[int] = None
adjacency: Optional[int] = None
blob: Optional[Base64] = None
tensor: Optional[NdArray] = None
mime_type: Optional[str] = None
text: Optional[str] = None
weight: Optional[float] = None
uri: Optional[str] = None
tags: Optional[JSONScalar] = None
offset: Optional[float] = None
location: Optional[List[float]] = None
embedding: Optional[NdArray] = None
modality: Optional[str] = None
### type
@strawberry.type
class _NamedScore(_NamedScoreInterface):
...
@strawberry.type
class _NameScoreItem:
name: str
score: _NamedScore
@strawberry.type
class StrawberryDocument(strawberry.type(_BaseStrawberryDocumentInterface)):
evaluations: Optional[List[_NameScoreItem]] = None
scores: Optional[List[_NameScoreItem]] = None
chunks: Optional[List['StrawberryDocument']] = None
matches: Optional[List['StrawberryDocument']] = None
### input
@strawberry.input
class _NamedScoreInput(_NamedScoreInterface):
...
@strawberry.input
class _NameScoreItemInput:
name: str
score: _NamedScoreInput
@strawberry.input
class StrawberryDocumentInput(strawberry.input(_BaseStrawberryDocumentInterface)):
evaluations: Optional[List[_NameScoreItemInput]] = None
scores: Optional[List[_NameScoreItemInput]] = None
chunks: Optional[List['StrawberryDocumentInput']] = None
matches: Optional[List['StrawberryDocumentInput']] = None
|
import base64
import json
from typing import List, Dict, Union, NewType, Any, Optional
import numpy as np
import strawberry
from ..math.ndarray import to_list
_ProtoValueType = Union[bool, float, str]
_StructValueType = Union[
_ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]
]
JSONScalar = strawberry.scalar(
NewType('JSONScalar', Any),
serialize=lambda v: v,
parse_value=lambda v: json.loads(v),
description="The GenericScalar scalar type represents a generic GraphQL scalar value that could be: List or Object.",
)
Base64 = strawberry.scalar(
NewType('Base64', bytes),
serialize=lambda v: base64.b64encode(v).decode('utf-8'),
parse_value=lambda v: base64.b64decode(v.encode('utf-8')),
)
NdArray = strawberry.scalar(
NewType('NdArray', bytes),
serialize=lambda v: to_list(v),
parse_value=lambda v: np.array(v),
)
### interface
@strawberry.interface
class _NamedScoreInterface:
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
@strawberry.interface
class _BaseStrawberryDocumentInterface:
id: Optional[str] = None
parent_id: Optional[str] = None
granularity: Optional[int] = None
adjacency: Optional[int] = None
blob: Optional[Base64] = None
tensor: Optional[NdArray] = None
mime_type: Optional[str] = None
text: Optional[str] = None
weight: Optional[float] = None
uri: Optional[str] = None
tags: Optional[JSONScalar] = None
offset: Optional[float] = None
location: Optional[List[float]] = None
embedding: Optional[NdArray] = None
modality: Optional[str] = None
### type
@strawberry.type
class _NamedScore(_NamedScoreInterface):
...
@strawberry.type
class _NameScoreItem:
name: str
score: _NamedScore
@strawberry.type
class StrawberryDocument(strawberry.type(_BaseStrawberryDocumentInterface)):
evaluations: Optional[List[_NameScoreItem]] = None
scores: Optional[List[_NameScoreItem]] = None
chunks: Optional[List['StrawberryDocument']] = None
matches: Optional[List['StrawberryDocument']] = None
### input
@strawberry.input
class _NamedScoreInput(_NamedScoreInterface):
...
@strawberry.input
class _NameScoreItemInput:
name: str
score: _NamedScoreInput
@strawberry.input
class StrawberryDocumentInput(strawberry.input(_BaseStrawberryDocumentInterface)):
evaluations: Optional[List[_NameScoreItemInput]] = None
scores: Optional[List[_NameScoreItemInput]] = None
chunks: Optional[List['StrawberryDocumentInput']] = None
matches: Optional[List['StrawberryDocumentInput']] = None
|
from rich.progress import (
Progress,
BarColumn,
SpinnerColumn,
MofNCompleteColumn,
TextColumn,
TimeRemainingColumn,
Text,
)
class QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text = 'unknown'
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
def get_pbar(disable):
return Progress(
SpinnerColumn(),
TextColumn('[bold]{task.description}'),
BarColumn(),
MofNCompleteColumn(),
'•',
QPSColumn('{task.speed} QPS', justify='right', style='progress.data.speed'),
'•',
TimeRemainingColumn(),
'•',
TextColumn(
'[bold blue]{task.fields[total_size]}',
justify='right',
style='progress.filesize',
),
transient=False,
disable=disable,
)
def get_progressbar(description, disable, total):
progress = get_pbar(disable)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
|
from rich.progress import (
Progress,
BarColumn,
SpinnerColumn,
MofNCompleteColumn,
TextColumn,
TimeRemainingColumn,
Text,
)
class QPSColumn(TextColumn):
def render(self, task) -> Text:
if task.speed:
_text = f'{task.speed:.0f} QPS'
else:
_text = 'unknown'
if self.markup:
text = Text.from_markup(_text, style=self.style, justify=self.justify)
else:
text = Text(_text, style=self.style, justify=self.justify)
if self.highlighter:
self.highlighter.highlight(text)
return text
def get_pbar(disable):
return Progress(
SpinnerColumn(),
TextColumn('[bold]{task.description}'),
BarColumn(),
MofNCompleteColumn(),
'•',
QPSColumn('{task.speed} QPS', justify='right', style='progress.data.speed'),
'•',
TimeRemainingColumn(),
'•',
TextColumn(
'[bold blue]{task.fields[total_size]}',
justify='right',
style='progress.filesize',
),
transient=True,
disable=disable,
)
def get_progressbar(description, disable, total):
progress = get_pbar(disable)
task = progress.add_task(description, total=total, start=False, total_size=0)
return progress, task
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(
path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Any
from langchain_core.memory import BaseMemory
class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other information that shouldn't
ever change between prompts.
"""
memories: dict[str, Any] = {}
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
return self.memories
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
|
from typing import Any
from langchain_core.memory import BaseMemory
class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other information that shouldn't
ever change between prompts.
"""
memories: dict[str, Any] = dict()
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
return self.memories
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
|
_base_ = 'mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')))
|
_base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')))
|
from __future__ import annotations
import pytest
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
from __future__ import annotations
import pytest
from datasets import Dataset
from torch.utils.data import BatchSampler, ConcatDataset, SequentialSampler
from sentence_transformers.sampler import RoundRobinBatchSampler
DATASET_LENGTH = 25
@pytest.fixture
def dummy_concat_dataset() -> ConcatDataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 1, 2, ... , 23, 24, 100, 101, ..., 123, 124],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
values_1 = list(range(DATASET_LENGTH))
labels = [x % 2 for x in values_1]
dataset_1 = Dataset.from_dict({"data": values_1, "label": labels})
values_2 = [x + 100 for x in values_1] + [x + 200 for x in values_1]
dataset_2 = Dataset.from_dict({"data": values_2, "label": labels + labels})
return ConcatDataset([dataset_1, dataset_2])
def test_round_robin_batch_sampler(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[0]))), batch_size=batch_size, drop_last=True
)
batch_sampler_2 = BatchSampler(
SequentialSampler(range(len(dummy_concat_dataset.datasets[1]))), batch_size=batch_size, drop_last=True
)
sampler = RoundRobinBatchSampler(dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2])
batches = list(iter(sampler))
# Despite the second dataset being larger (2 * DATASET_LENGTH), we still only sample DATASET_LENGTH // batch_size batches from each dataset
# because the RoundRobinBatchSampler should stop sampling once it has sampled all elements from one dataset
assert len(batches) == 2 * DATASET_LENGTH // batch_size
assert len(sampler) == len(batches)
# Assert that batches are produced in a round-robin fashion
for i in range(0, len(batches), 2):
# Batch from the first part of the dataset
batch_1 = batches[i]
assert all(
dummy_concat_dataset[idx]["data"] < 100 for idx in batch_1
), f"Batch {i} contains data from the second part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_1]}"
# Batch from the second part of the dataset
batch_2 = batches[i + 1]
assert all(
dummy_concat_dataset[idx]["data"] >= 100 for idx in batch_2
), f"Batch {i+1} contains data from the first part of the dataset: {[dummy_concat_dataset[idx]['data'] for idx in batch_2]}"
def test_round_robin_batch_sampler_value_error(dummy_concat_dataset: ConcatDataset) -> None:
batch_size = 4
batch_sampler_1 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_2 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
batch_sampler_3 = BatchSampler(SequentialSampler(range(DATASET_LENGTH)), batch_size=batch_size, drop_last=True)
with pytest.raises(
ValueError, match="The number of batch samplers must match the number of datasets in the ConcatDataset"
):
RoundRobinBatchSampler(
dataset=dummy_concat_dataset, batch_samplers=[batch_sampler_1, batch_sampler_2, batch_sampler_3]
)
|
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""This is an abstract base class that represents the shared features across the NeptuneDatabaseGraphStore
and NeptuneAnalyticsGraphStore classes.
"""
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = """
MATCH (n1:%s)-[r]->(n2:%s)
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
prepared_statement = query % (self.node_label, self.node_label)
with self._driver.session(database=self._database) as session:
data = session.run(prepared_statement, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get flat rel map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() WHERE n1.id IN $subjs
UNWIND relationships(p) AS rel WITH n1.id AS subj, p,
collect([type(rel), endNode(rel).id])AS flattened_rels
UNWIND flattened_rels as fr
WITH DISTINCT fr, subj
RETURN subj, collect(fr) AS flattened_rels LIMIT {limit}"""
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["flattened_rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet to the graph."""
query = """
MERGE (n1:`%s` {id:$subj})
MERGE (n2:`%s` {id:$obj})
MERGE (n1)-[:`%s`]->(n2)
"""
prepared_statement = query % (
self.node_label.replace("`", ""),
self.node_label.replace("`", ""),
rel.replace(" ", "_").replace("`", "").upper(),
)
self.query(
prepared_statement,
{"subj": subj.replace("`", ""), "obj": obj.replace("`", "")},
)
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet from the graph."""
def delete_rel(subj: str, obj: str, rel: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
(
"MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $subj AND n2.id"
" = $obj DELETE r"
).format(self.node_label, rel, self.node_label),
{"subj": subj, "obj": obj},
)
def delete_entity(entity: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
"MATCH (n:%s) WHERE n.id = $entity DETACH DELETE n"
% self.node_label,
{"entity": entity},
)
delete_rel(subj, obj, rel)
delete_entity(subj)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Neptune KG store."""
if refresh or not self.schema:
self.schema = refresh_schema(self.query, self._get_summary())["schema_str"]
return self.schema
@abstractmethod
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
|
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""This is an abstract base class that represents the shared features across the NeptuneDatabaseGraphStore
and NeptuneAnalyticsGraphStore classes.
"""
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = """
MATCH (n1:%s)-[r]->(n2:%s)
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
prepared_statement = query % (self.node_label, self.node_label)
with self._driver.session(database=self._database) as session:
data = session.run(prepared_statement, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get flat rel map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() WHERE n1.id IN $subjs
UNWIND relationships(p) AS rel WITH n1.id AS subj, p,
collect([type(rel), endNode(rel).id])AS flattened_rels
UNWIND flattened_rels as fr
WITH DISTINCT fr, subj
RETURN subj, collect(fr) AS flattened_rels LIMIT {limit}"""
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["flattened_rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet to the graph."""
query = """
MERGE (n1:`%s` {id:$subj})
MERGE (n2:`%s` {id:$obj})
MERGE (n1)-[:`%s`]->(n2)
"""
prepared_statement = query % (
self.node_label.replace("`", ""),
rel.replace(" ", "_").replace("`", "").upper(),
)
self.query(
prepared_statement,
{"subj": subj.replace("`", ""), "obj": obj.replace("`", "")},
)
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet from the graph."""
def delete_rel(subj: str, obj: str, rel: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
(
"MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $subj AND n2.id"
" = $obj DELETE r"
).format(self.node_label, rel, self.node_label),
{"subj": subj, "obj": obj},
)
def delete_entity(entity: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
"MATCH (n:%s) WHERE n.id = $entity DETACH DELETE n"
% self.node_label,
{"entity": entity},
)
delete_rel(subj, obj, rel)
delete_entity(subj)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Neptune KG store."""
if refresh or not self.schema:
self.schema = refresh_schema(self.query, self._get_summary())["schema_str"]
return self.schema
@abstractmethod
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierNLAListActions",
"ZapierNLARunAction",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierNLARunAction",
"ZapierNLAListActions",
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
@pytest.mark.gpu
def test_encoding_gpu():
encoder = LaserEncoder(device='cuda')
docs = DocumentArray((Document(text='random text')))
encoder.encode(docs, {})
assert len(docs.get_attributes('embedding')) == 1
assert docs[0].embedding.shape == (1024,)
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laser embedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
@pytest.mark.gpu
def test_encoding_gpu():
encoder = LaserEncoder(device='cuda')
docs = DocumentArray((Document(text='random text')))
encoder.encode(docs, {})
assert len(docs.get_attributes('embedding')) == 1
assert docs[0].embedding.shape == (1024,)
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
def test_no_documents():
encoder = LaserEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laser embedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction, read_base
__all__ = ['Config', 'ConfigDict', 'DictAction', 'read_base']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .config import Config, ConfigDict, DictAction
__all__ = ['Config', 'ConfigDict', 'DictAction']
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = [2, 4, 6, 8]
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
|
"""Async utils."""
import asyncio
import concurrent.futures
from itertools import zip_longest
from typing import Any, Coroutine, Iterable, List, Optional, TypeVar
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
def asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
def asyncio_run(coro: Coroutine) -> Any:
"""
Gets an existing event loop to run the coroutine.
If there is no existing event loop, creates a new one.
If an event loop is already running, uses threading to run in a separate thread.
"""
try:
# Check if there's an existing event loop
loop = asyncio.get_event_loop()
# Check if the loop is already running
if loop.is_running():
# If loop is already running, run in a separate thread
def run_coro_in_thread() -> Any:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
return new_loop.run_until_complete(coro)
finally:
new_loop.close()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(run_coro_in_thread)
return future.result()
else:
# If we're here, there's an existing loop but it's not running
return loop.run_until_complete(coro)
except RuntimeError as e:
# If we can't get the event loop, we're likely in a different thread
try:
return asyncio.run(coro)
except RuntimeError as e:
raise RuntimeError(
"Detected nested async. Please use nest_asyncio.apply() to allow nested event loops."
"Or, use async entry methods like `aquery()`, `aretriever`, `achat`, etc."
)
def run_async_tasks(
tasks: List[Coroutine],
show_progress: bool = False,
progress_bar_desc: str = "Running async tasks",
) -> List[Any]:
"""Run a list of async tasks."""
tasks_to_execute: List[Any] = tasks
if show_progress:
try:
import nest_asyncio
from tqdm.asyncio import tqdm
# jupyter notebooks already have an event loop running
# we need to reuse it instead of creating a new one
nest_asyncio.apply()
loop = asyncio.get_event_loop()
async def _tqdm_gather() -> List[Any]:
return await tqdm.gather(*tasks_to_execute, desc=progress_bar_desc)
tqdm_outputs: List[Any] = loop.run_until_complete(_tqdm_gather())
return tqdm_outputs
# run the operation w/o tqdm on hitting a fatal
# may occur in some environments where tqdm.asyncio
# is not supported
except Exception:
pass
async def _gather() -> List[Any]:
return await asyncio.gather(*tasks_to_execute)
outputs: List[Any] = asyncio_run(_gather())
return outputs
def chunks(iterable: Iterable, size: int) -> Iterable:
args = [iter(iterable)] * size
return zip_longest(*args, fillvalue=None)
async def batch_gather(
tasks: List[Coroutine], batch_size: int = 10, verbose: bool = False
) -> List[Any]:
output: List[Any] = []
for task_chunk in chunks(tasks, batch_size):
task_chunk = (task for task in task_chunk if task is not None)
output_chunk = await asyncio.gather(*task_chunk)
output.extend(output_chunk)
if verbose:
print(f"Completed {len(output)} out of {len(tasks)} tasks")
return output
def get_asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
DEFAULT_NUM_WORKERS = 4
T = TypeVar("T")
@dispatcher.span
async def run_jobs(
jobs: List[Coroutine[Any, Any, T]],
show_progress: bool = False,
workers: int = DEFAULT_NUM_WORKERS,
desc: Optional[str] = None,
) -> List[T]:
"""
Run jobs.
Args:
jobs (List[Coroutine]):
List of jobs to run.
show_progress (bool):
Whether to show progress bar.
Returns:
List[Any]:
List of results.
"""
semaphore = asyncio.Semaphore(workers)
@dispatcher.span
async def worker(job: Coroutine) -> Any:
async with semaphore:
return await job
pool_jobs = [worker(job) for job in jobs]
if show_progress:
from tqdm.asyncio import tqdm_asyncio
results = await tqdm_asyncio.gather(*pool_jobs, desc=desc)
else:
results = await asyncio.gather(*pool_jobs)
return results
|
"""Async utils."""
import asyncio
from itertools import zip_longest
from typing import Any, Coroutine, Iterable, List, Optional, TypeVar
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
def asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
def asyncio_run(coro: Coroutine) -> Any:
"""
Gets an existing event loop to run the coroutine.
If there is no existing event loop, creates a new one.
"""
try:
# Check if there's an existing event loop
loop = asyncio.get_event_loop()
# If we're here, there's an existing loop but it's not running
return loop.run_until_complete(coro)
except RuntimeError as e:
# If we can't get the event loop, we're likely in a different thread, or its already running
try:
return asyncio.run(coro)
except RuntimeError as e:
raise RuntimeError(
"Detected nested async. Please use nest_asyncio.apply() to allow nested event loops."
"Or, use async entry methods like `aquery()`, `aretriever`, `achat`, etc."
)
def run_async_tasks(
tasks: List[Coroutine],
show_progress: bool = False,
progress_bar_desc: str = "Running async tasks",
) -> List[Any]:
"""Run a list of async tasks."""
tasks_to_execute: List[Any] = tasks
if show_progress:
try:
import nest_asyncio
from tqdm.asyncio import tqdm
# jupyter notebooks already have an event loop running
# we need to reuse it instead of creating a new one
nest_asyncio.apply()
loop = asyncio.get_event_loop()
async def _tqdm_gather() -> List[Any]:
return await tqdm.gather(*tasks_to_execute, desc=progress_bar_desc)
tqdm_outputs: List[Any] = loop.run_until_complete(_tqdm_gather())
return tqdm_outputs
# run the operation w/o tqdm on hitting a fatal
# may occur in some environments where tqdm.asyncio
# is not supported
except Exception:
pass
async def _gather() -> List[Any]:
return await asyncio.gather(*tasks_to_execute)
outputs: List[Any] = asyncio_run(_gather())
return outputs
def chunks(iterable: Iterable, size: int) -> Iterable:
args = [iter(iterable)] * size
return zip_longest(*args, fillvalue=None)
async def batch_gather(
tasks: List[Coroutine], batch_size: int = 10, verbose: bool = False
) -> List[Any]:
output: List[Any] = []
for task_chunk in chunks(tasks, batch_size):
task_chunk = (task for task in task_chunk if task is not None)
output_chunk = await asyncio.gather(*task_chunk)
output.extend(output_chunk)
if verbose:
print(f"Completed {len(output)} out of {len(tasks)} tasks")
return output
def get_asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
DEFAULT_NUM_WORKERS = 4
T = TypeVar("T")
@dispatcher.span
async def run_jobs(
jobs: List[Coroutine[Any, Any, T]],
show_progress: bool = False,
workers: int = DEFAULT_NUM_WORKERS,
desc: Optional[str] = None,
) -> List[T]:
"""
Run jobs.
Args:
jobs (List[Coroutine]):
List of jobs to run.
show_progress (bool):
Whether to show progress bar.
Returns:
List[Any]:
List of results.
"""
semaphore = asyncio.Semaphore(workers)
@dispatcher.span
async def worker(job: Coroutine) -> Any:
async with semaphore:
return await job
pool_jobs = [worker(job) for job in jobs]
if show_progress:
from tqdm.asyncio import tqdm_asyncio
results = await tqdm_asyncio.gather(*pool_jobs, desc=desc)
else:
results = await asyncio.gather(*pool_jobs)
return results
|
"""Load agent."""
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import load_agent
from langchain.agents.types import AGENT_TO_CLASS
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
msg = (
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
raise ValueError(msg)
if agent is not None:
if agent not in AGENT_TO_CLASS:
msg = (
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
raise ValueError(msg)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
try:
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
msg = (
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
raise ValueError(msg)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
"""Load agent."""
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import load_agent
from langchain.agents.types import AGENT_TO_CLASS
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
try:
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict,
llm: BaseLanguageModel,
tools: list[Tool],
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
msg = "Must specify an agent Type in config"
raise ValueError(msg)
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
msg = (
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
raise ValueError(msg)
if tools is None:
msg = (
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
raise ValueError(msg)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
msg = "One of `llm_chain` and `llm_chain_path` should be specified."
raise ValueError(msg)
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one.",
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path],
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path],
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
file_path = Path(file) if isinstance(file, str) else file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
msg = f"Unsupported file type, must be one of {valid_suffixes}."
raise ValueError(msg)
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
msg = "Must specify an agent Type in config"
raise ValueError(msg)
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
msg = (
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
raise ValueError(msg)
if tools is None:
msg = (
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
raise ValueError(msg)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
msg = f"Loading {config_type} agent not supported"
raise ValueError(msg)
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
msg = "One of `llm_chain` and `llm_chain_path` should be specified."
raise ValueError(msg)
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
msg = (
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
raise RuntimeError(msg)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
file_path = Path(file) if isinstance(file, str) else file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
msg = f"Unsupported file type, must be one of {valid_suffixes}."
raise ValueError(msg)
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
store_listing_version_id: str | None = None
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
class ReviewSubmissionRequest(pydantic.BaseModel):
store_listing_version_id: str
isApproved: bool
comments: str
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocArray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocList v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}/{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _subscribe(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
channel_name = f"{self.event_bus_name}/{channel_key}"
pubsub = connection.pubsub()
return pubsub, channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
self.connection.publish(channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, channel_name = self._subscribe(self.connection, channel_key)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(channel_name)
else:
pubsub.subscribe(channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, channel_name = self._subscribe(await self.connection, channel_key)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(channel_name)
else:
await pubsub.subscribe(channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
from backend.data.execution import ExecutionResult
from backend.util.settings import Config
logger = logging.getLogger(__name__)
config = Config()
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}-{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _subscribe(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
channel_name = f"{self.event_bus_name}-{channel_key}"
pubsub = connection.pubsub()
return pubsub, channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
self.connection.publish(channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, channel_name = self._subscribe(self.connection, channel_key)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(channel_name)
else:
pubsub.subscribe(channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, channel_name = self._subscribe(await self.connection, channel_key)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(channel_name)
else:
await pubsub.subscribe(channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class RedisExecutionEventBus(RedisEventBus[ExecutionResult]):
Model = ExecutionResult
@property
def event_bus_name(self) -> str:
return config.execution_event_bus_name
def publish(self, res: ExecutionResult):
self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
def listen(
self, graph_id: str = "*", graph_exec_id: str = "*"
) -> Generator[ExecutionResult, None, None]:
for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
yield execution_result
class AsyncRedisExecutionEventBus(AsyncRedisEventBus[ExecutionResult]):
Model = ExecutionResult
@property
def event_bus_name(self) -> str:
return config.execution_event_bus_name
async def publish(self, res: ExecutionResult):
await self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
async def listen(
self, graph_id: str = "*", graph_exec_id: str = "*"
) -> AsyncGenerator[ExecutionResult, None]:
async for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
yield execution_result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.