input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
"""Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
from typing import TYPE_CHECKING
import numpy as np
from docarray.dataclasses.enums import DocumentMetadata, ImageType
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata[DocumentMetadata.IMAGE_TYPE] = ImageType.URI
doc.load_uri_to_image_tensor()
elif isinstance(value, np.ndarray):
doc.tensor = value
doc._metadata[DocumentMetadata.IMAGE_TYPE] = ImageType.NDARRAY
else:
from PIL.Image import Image
if isinstance(value, Image):
doc.tensor = np.array(value)
doc._metadata[DocumentMetadata.IMAGE_TYPE] = ImageType.PIL
return doc
def text_setter(value) -> 'Document':
from docarray import Document
return Document(text=value, modality='text')
def uri_setter(value) -> 'Document':
from docarray import Document
return Document(uri=value)
def audio_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(
tensor=value, _metadata={DocumentMetadata.AUDIO_TYPE: 'ndarray'}
)
else:
return Document(
uri=value, modality='audio', _metadata={DocumentMetadata.AUDIO_TYPE: 'uri'}
).load_uri_to_audio_tensor()
def video_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(
tensor=value, _metadata={DocumentMetadata.VIDEO_TYPE: 'ndarray'}
)
else:
return Document(
uri=value, modality='video', _metadata={DocumentMetadata.VIDEO_TYPE: 'uri'}
).load_uri_to_video_tensor()
def mesh_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={DocumentMetadata.MESH_TYPE: 'ndarray'})
else:
return Document(
uri=value, modality='mesh', _metadata={DocumentMetadata.MESH_TYPE: 'uri'}
).load_uri_to_point_cloud_tensor(1000)
def blob_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, bytes):
return Document(blob=value, _metadata={DocumentMetadata.BLOB_TYPE: 'bytes'})
else:
return Document(
uri=value, _metadata={DocumentMetadata.BLOB_TYPE: 'uri'}
).load_uri_to_blob()
def json_setter(value) -> 'Document':
from docarray import Document
return Document(modality='json', tags=value)
def tabular_setter(value) -> 'Document':
from docarray import Document, DocumentArray
return Document(uri=value, chunks=DocumentArray.from_csv(value), modality='tabular')
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['image_type'] = 'uri'
doc.load_uri_to_image_tensor()
elif isinstance(value, np.ndarray):
doc.tensor = value
doc._metadata['image_type'] = 'ndarray'
else:
from PIL.Image import Image
if isinstance(value, Image):
doc.tensor = np.array(value)
doc._metadata['image_type'] = 'PIL'
return doc
def text_setter(value) -> 'Document':
from docarray import Document
return Document(text=value, modality='text')
def uri_setter(value) -> 'Document':
from docarray import Document
return Document(uri=value)
def audio_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'audio_type': 'ndarray'})
else:
return Document(
uri=value, modality='audio', _metadata={'audio_type': 'uri'}
).load_uri_to_audio_tensor()
def video_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'video_type': 'ndarray'})
else:
return Document(
uri=value, modality='video', _metadata={'video_type': 'uri'}
).load_uri_to_video_tensor()
def mesh_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'mesh_type': 'ndarray'})
else:
return Document(
uri=value, modality='mesh', _metadata={'mesh_type': 'uri'}
).load_uri_to_point_cloud_tensor(1000)
def blob_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, bytes):
return Document(blob=value, _metadata={'blob_type': 'bytes'})
else:
return Document(uri=value, _metadata={'blob_type': 'uri'}).load_uri_to_blob()
def json_setter(value) -> 'Document':
from docarray import Document
return Document(modality='json', tags=value)
def tabular_setter(value) -> 'Document':
from docarray import Document, DocumentArray
return Document(uri=value, chunks=DocumentArray.from_csv(value), modality='tabular')
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.storage import UpstashRedisByteStore, UpstashRedisStore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UpstashRedisStore": "langchain_community.storage",
"UpstashRedisByteStore": "langchain_community.storage",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UpstashRedisByteStore",
"UpstashRedisStore",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.storage import UpstashRedisByteStore, UpstashRedisStore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UpstashRedisStore": "langchain_community.storage",
"UpstashRedisByteStore": "langchain_community.storage",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UpstashRedisStore",
"UpstashRedisByteStore",
]
|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat, is_rotated_bounding_format
from ._image import Image
from ._keypoints import KeyPoints
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to
# RecursionError: maximum recursion depth exceeded while calling a Python object
# Until `disable` is removed, there will be graph breaks after all calls to functional transforms
@torch.compiler.disable
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
elif isinstance(like, KeyPoints):
return KeyPoints._wrap(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size))
else:
return wrappee.as_subclass(type(like))
|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat, is_rotated_bounding_format
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to
# RecursionError: maximum recursion depth exceeded while calling a Python object
# Until `disable` is removed, there will be graph breaks after all calls to functional transforms
@torch.compiler.disable
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDeformableDETR(TestCase):
def setUp(self):
register_all_modules()
def test_deformable_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
configs = [
get_detector_cfg(
'deformable_detr/deformable-detr_r50_16xb2-50e_coco.py'),
get_detector_cfg(
'deformable_detr/deformable-detr_refine_r50_16xb2-50e_coco.py' # noqa
),
get_detector_cfg(
'deformable_detr/deformable-detr_refine_twostage_r50_16xb2-50e_coco.py' # noqa
)
]
for config in configs:
model = build_detector(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes'
)
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when no ground true boxes'
)
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples2,
rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDeformableDETR(TestCase):
def setUp(self):
register_all_modules()
def test_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
configs = [
get_detector_cfg(
'deformable_detr/deformable-detr_r50_16xb2-50e_coco.py'),
get_detector_cfg(
'deformable_detr/deformable-detr_refine_r50_16xb2-50e_coco.py' # noqa
),
get_detector_cfg(
'deformable_detr/deformable-detr_refine_twostage_r50_16xb2-50e_coco.py' # noqa
)
]
for config in configs:
model = build_detector(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes'
)
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when no ground true boxes'
)
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples2,
rescale=True)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_accelerator,
require_torch,
torch_device,
)
enable_full_determinism()
class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=6,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
@property
def dummy_vq_model(self):
torch.manual_seed(0)
model = VQModel(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=3,
)
return model
def test_inference_superresolution(self):
device = "cpu"
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(device)
generator = torch.Generator(device=device).manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@require_accelerator
def test_inference_superresolution_fp16(self):
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
# put models in fp16
unet = unet.half()
vqvae = vqvae.half()
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(torch_device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(torch_device)
image = ldm(init_image, num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch
class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase):
def test_inference_superresolution(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool.png"
)
init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"])
ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution")
ldm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_torch,
torch_device,
)
enable_full_determinism()
class LDMSuperResolutionPipelineFastTests(unittest.TestCase):
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_uncond_unet(self):
torch.manual_seed(0)
model = UNet2DModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=6,
out_channels=3,
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
)
return model
@property
def dummy_vq_model(self):
torch.manual_seed(0)
model = VQModel(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=3,
)
return model
def test_inference_superresolution(self):
device = "cpu"
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(device)
generator = torch.Generator(device=device).manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU")
def test_inference_superresolution_fp16(self):
unet = self.dummy_uncond_unet
scheduler = DDIMScheduler()
vqvae = self.dummy_vq_model
# put models in fp16
unet = unet.half()
vqvae = vqvae.half()
ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler)
ldm.to(torch_device)
ldm.set_progress_bar_config(disable=None)
init_image = self.dummy_image.to(torch_device)
image = ldm(init_image, num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch
class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase):
def test_inference_superresolution(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool.png"
)
init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"])
ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution")
ldm.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
HubSpotCredentials = APIKeyCredentials
HubSpotCredentialsInput = CredentialsMetaInput[
Literal["hubspot"],
Literal["api_key"],
]
def HubSpotCredentialsField() -> HubSpotCredentialsInput:
"""Creates a HubSpot credentials input on a block."""
return CredentialsField(
provider="hubspot",
supported_credential_types={"api_key"},
description="The HubSpot integration requires an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="hubspot",
api_key=SecretStr("mock-hubspot-api-key"),
title="Mock HubSpot API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
|
from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
HubSpotCredentials = APIKeyCredentials
HubSpotCredentialsInput = CredentialsMetaInput[
Literal["hubspot"],
Literal["api_key"],
]
def HubSpotCredentialsField() -> HubSpotCredentialsInput:
"""Creates a HubSpot credentials input on a block."""
return CredentialsField(
provider="hubspot",
supported_credential_types={"api_key"},
description="The HubSpot integration requires an API Key.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="hubspot",
api_key=SecretStr("mock-hubspot-api-key"),
title="Mock HubSpot API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
|
import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again."""
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
pil_image = Image.open(image_path).convert("RGB")
if self.transform is not None:
pil_image = self.transform(pil_image)
if self.target_transform is not None:
target = self.target_transform(target)
return pil_image, target
def download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz",
download_root=str(self._base_folder),
md5="c3b158d763b6e2245038c8ad08e45376",
)
if self._split == "train":
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/car196/cars_train.tgz",
download_root=str(self._base_folder),
md5="065e5b463ae28d29e77c1b4b166cfe61",
)
else:
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/car196/cars_test.tgz",
download_root=str(self._base_folder),
md5="4ce7ebf6a94d07f1952d94dd34c4d501",
)
download_url(
url="https://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat",
root=str(self._base_folder),
md5="b0a2b23655a3edd16d84508592a98d10",
)
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
|
import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again."""
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
pil_image = Image.open(image_path).convert("RGB")
if self.transform is not None:
pil_image = self.transform(pil_image)
if self.target_transform is not None:
target = self.target_transform(target)
return pil_image, target
def download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz",
download_root=str(self._base_folder),
md5="c3b158d763b6e2245038c8ad08e45376",
)
if self._split == "train":
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/car196/cars_train.tgz",
download_root=str(self._base_folder),
md5="065e5b463ae28d29e77c1b4b166cfe61",
)
else:
download_and_extract_archive(
url="https://ai.stanford.edu/~jkrause/car196/cars_test.tgz",
download_root=str(self._base_folder),
md5="4ce7ebf6a94d07f1952d94dd34c4d501",
)
download_url(
url="https://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat",
root=str(self._base_folder),
md5="b0a2b23655a3edd16d84508592a98d10",
)
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
|
import os
import pytest
import respx
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.core.schema import NodeWithScore, Document
from typing import Any
@pytest.fixture()
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://test_url/v1/models").respond(
json={"data": [{"id": "model1"}]}
)
def get_api_key(instance: Any) -> str:
return instance._api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as e:
Interface()
assert "API key is required" in str(e.value)
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
Interface(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = Interface(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(Interface()) == "ENV"
assert get_api_key(Interface(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = Interface(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = Interface(model=model, **{**mode, **{param: masked_env_var}})
assert client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
|
import os
import pytest
import respx
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.core.schema import NodeWithScore, Document
from typing import Any
@pytest.fixture()
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://test_url/v1/models").respond(
json={"data": [{"id": "model1"}]}
)
def get_api_key(instance: Any) -> str:
return instance._api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as e:
Interface()
assert "API key is required" in str(e.value)
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
Interface(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = Interface(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(Interface()) == "ENV"
assert get_api_key(Interface(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = Interface(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = Interface(model=model, **{**mode, **{param: masked_env_var}})
assert client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `CLASSES`
in `dataset.metainfo`."""
def _check_head(self, runner: Runner, mode: str) -> None:
"""Check whether the `num_classes` in head matches the length of
`CLASSES` in `dataset.metainfo`.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
assert mode in ['train', 'val']
model = runner.model
dataset = runner.train_dataloader.dataset if mode == 'train' else \
runner.val_dataloader.dataset
if dataset.metainfo.get('CLASSES', None) is None:
runner.logger.warning(
f'Please set `CLASSES` '
f'in the {dataset.__class__.__name__} `metainfo` and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
CLASSES = dataset.metainfo['CLASSES']
assert type(CLASSES) is not str, \
(f'`CLASSES` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'CLASSES = ({CLASSES},)')
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not name.endswith(
'rpn_head') and not isinstance(
module, (VGG, FusedSemanticHead)):
assert module.num_classes == len(CLASSES), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `CLASSES` '
f'{len(CLASSES)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner: Runner) -> None:
"""Check whether the training dataset is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'train')
def before_val_epoch(self, runner: Runner) -> None:
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'val')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `CLASSES`
in `dataset.metainfo`."""
def _check_head(self, runner: Runner, mode: str) -> None:
"""Check whether the `num_classes` in head matches the length of
`CLASSES` in `dataset.metainfo`.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
assert mode in ['train', 'val']
model = runner.model
dataset = runner.train_dataloader.dataset if mode == 'train' else \
runner.val_dataloader.dataset
if dataset.metainfo.get('CLASSES', None) is None:
runner.logger.warning(
f'Please set `CLASSES` '
f'in the {dataset.__class__.__name__} `metainfo` and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
CLASSES = dataset.metainfo['CLASSES']
assert type(CLASSES) is not str, \
(f'`CLASSES` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'CLASSES = ({CLASSES},)')
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
for name, module in model.named_modules():
if hasattr(module, 'num_classes'
) and name != 'rpn_head' and not isinstance(
module, (VGG, FusedSemanticHead)):
assert module.num_classes == len(CLASSES), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `CLASSES` '
f'{len(CLASSES)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner: Runner) -> None:
"""Check whether the training dataset is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'train')
def before_val_epoch(self, runner: Runner) -> None:
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'val')
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
return None
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
schema = self.get_fn_schema_from_fn_name(fn_name, spec_functions=spec_functions)
if schema is None:
return None
func = getattr(self, fn_name)
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
return None
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
schema = self.get_fn_schema_from_fn_name(fn_name, spec_functions=spec_functions)
if schema is None:
return None
func = getattr(self, fn_name)
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import contextlib
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
self._current_iterator = None
self._epoch_iterator = None
self._steps_seen = 0
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def _interrupted_warning(self):
warnings.warn(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
def reset(self):
self._current_iterator = None
self._num_batches = self.data_adapter.num_batches
self._steps_seen = 0
self._epoch_iterator = None
def _enumerate_iterator(self):
self.data_adapter.on_epoch_begin()
steps_per_epoch = self.steps_per_epoch or self._num_batches or -1
if steps_per_epoch > 0:
if self._current_iterator is None or self.steps_per_epoch is None:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
for step in range(0, steps_per_epoch, self.steps_per_execution):
if self._num_batches and self._steps_seen >= self._num_batches:
if self.steps_per_epoch:
self._interrupted_warning()
break
self._steps_seen += self.steps_per_execution
yield step, self._current_iterator
if self._num_batches and self._steps_seen >= self._num_batches:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
else:
iterator = iter(self._get_iterator())
step = -self.steps_per_execution
while True:
step += self.steps_per_execution
self._steps_seen = step + self.steps_per_execution
yield step, iterator
self.data_adapter.on_epoch_end()
def __iter__(self):
self._epoch_iterator = self._enumerate_iterator()
return self
def __next__(self):
buffer = []
step, iterator = next(self._epoch_iterator)
with self.catch_stop_iteration():
for _ in range(self.steps_per_execution):
data = next(iterator)
buffer.append(data)
return step, buffer
if buffer:
return step, buffer
raise StopIteration
def enumerate_epoch(self):
for step, data in self:
yield step, data
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
except StopIteration:
if self._num_batches is None:
self._num_batches = self._steps_seen
self._interrupted_warning()
self._current_iterator = None
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
if steps_per_epoch:
self._current_iterator = None
self._insufficient_data = False
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def enumerate_epoch(self):
buffer = []
self.data_adapter.on_epoch_begin()
if self.steps_per_epoch:
if self._current_iterator is None:
self._current_iterator = iter(self._get_iterator())
self._insufficient_data = False
for step in range(self.steps_per_epoch):
if self._insufficient_data:
break
try:
data = next(self._current_iterator)
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
except (StopIteration,):
warnings.warn(
"Your input ran out of data; interrupting epoch. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
self._current_iterator = None
self._insufficient_data = True
if buffer:
yield step - len(buffer) + 1, buffer
else:
for step, data in enumerate(self._get_iterator()):
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
if buffer:
yield step - len(buffer) + 1, buffer
if not self._num_batches:
# Infer the number of batches returned by the data_adapter.
# Assumed static.
self._num_batches = step + 1
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SQLDatabaseToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SQLDatabaseToolkit": "langchain_community.agent_toolkits.sql.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SQLDatabaseToolkit",
]
|
"""Azure Cognitive Vision tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze"
class AzureCVToolSpec(BaseToolSpec):
"""Azure Cognitive Vision tool spec."""
spec_functions = ["process_image"]
def __init__(
self,
resource: str,
api_key: str,
language: Optional[str] = "en",
api_version: Optional[str] = "2023-04-01-preview",
) -> None:
"""Initialize with parameters."""
self.api_key = api_key
self.cv_url = CV_URL_TMPL.format(resource=resource)
self.language = language
self.api_version = api_version
def process_image(self, url: str, features: List[str]):
"""
This tool accepts an image url or file and can process and return a variety of text depending on the use case.
You can use the features argument to configure what text you want returned.
Args:
url (str): The url for the image to caption
features (List[str]): Instructions on how to process the image. Valid keys are tags, objects, read, caption
"""
response = requests.post(
f'{self.cv_url}?features={",".join(features)}&language={self.language}&api-version={self.api_version}',
headers={"Ocp-Apim-Subscription-Key": self.api_key},
json={"url": url},
)
response_json = response.json()
if "read" in features:
response_json["readResult"] = response_json["readResult"]["content"]
return response_json
|
"""Azure Cognitive Vision tool spec."""
from typing import List, Optional
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
CV_URL_TMPL = "https://{resource}.cognitiveservices.azure.com/computervision/imageanalysis:analyze"
class AzureCVToolSpec(BaseToolSpec):
"""Azure Cognitive Vision tool spec."""
spec_functions = ["process_image"]
def __init__(
self,
resource: str,
api_key: str,
language: Optional[str] = "en",
api_version: Optional[str] = "2023-04-01-preview",
) -> None:
"""Initialize with parameters."""
self.api_key = api_key
self.cv_url = CV_URL_TMPL.format(resource=resource)
self.language = language
self.api_version = api_version
def process_image(self, url: str, features: List[str]):
"""
This tool accepts an image url or file and can process and return a variety of text depending on the use case.
You can use the features argument to configure what text you want returned.
Args:
url (str): The url for the image to caption
features (List[str]): Instructions on how to process the image. Valid keys are tags, objects, read, caption
"""
response = requests.post(
f'{self.cv_url}?features={",".join(features)}&language={self.language}&api-version={self.api_version}',
headers={"Ocp-Apim-Subscription-Key": self.api_key},
json={"url": url},
)
response_json = response.json()
if "read" in features:
response_json["readResult"] = response_json["readResult"]["content"]
return response_json
|
import os
import pytest
from unittest import mock
from zhipuai.types.chat.chat_completion import (
Completion,
CompletionChoice,
CompletionMessage,
CompletionUsage,
)
from llama_index.core.base.llms.types import CompletionResponse
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.zhipuai import ZhipuAI
def test_llm_class():
names_of_base_classes = [b.__name__ for b in ZhipuAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_zhipuai_llm_model_alias():
model = "glm-test"
api_key = "api_key_test"
llm = ZhipuAI(model=model, api_key=api_key)
assert llm.model == model
assert llm.model_kwargs is not None
def test_zhipuai_llm_metadata():
api_key = "api_key_test"
llm = ZhipuAI(model="glm-4", api_key=api_key)
assert llm.metadata.is_function_calling_model is True
llm = ZhipuAI(model="glm-4v", api_key=api_key)
assert llm.metadata.is_function_calling_model is False
def test_zhipuai_completions_with_stop():
mock_response = Completion(
model="glm-4",
created=1703487403,
choices=[
CompletionChoice(
index=0,
finish_reason="stop",
message=CompletionMessage(
role="assistant",
content="MOCK_RESPONSE",
),
)
],
usage=CompletionUsage(
prompt_tokens=31, completion_tokens=217, total_tokens=248
),
)
predict_response = CompletionResponse(
text="MOCK_RESPONSE",
additional_kwargs={"tool_calls": []},
raw=mock_response,
)
llm = ZhipuAI(model="glm-4", api_key="__fake_key__")
with mock.patch.object(
llm._client.chat.completions, "create", return_value=mock_response
):
actual_chat = llm.complete("__query__", stop=["stop_words"])
assert actual_chat == predict_response
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
def test_completion():
model = "glm-4"
api_key = os.getenv("ZHIPUAI_API_KEY")
llm = ZhipuAI(model=model, api_key=api_key)
assert llm.complete("who are you")
@pytest.mark.asyncio
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
async def test_async_completion():
model = "glm-4"
api_key = os.getenv("ZHIPUAI_API_KEY")
llm = ZhipuAI(model=model, api_key=api_key)
assert await llm.acomplete("who are you")
|
import os
import pytest
from unittest import mock
from zhipuai.types.chat.chat_completion import (
Completion,
CompletionChoice,
CompletionMessage,
CompletionUsage,
)
from llama_index.core.base.llms.types import CompletionResponse
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.zhipuai import ZhipuAI
def test_llm_class():
names_of_base_classes = [b.__name__ for b in ZhipuAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_zhipuai_llm_model_alias():
model = "glm-test"
api_key = "api_key_test"
llm = ZhipuAI(model=model, api_key=api_key)
assert llm.model == model
assert llm.model_kwargs is not None
def test_zhipuai_llm_metadata():
api_key = "api_key_test"
llm = ZhipuAI(model="glm-4", api_key=api_key)
assert llm.metadata.is_function_calling_model is True
llm = ZhipuAI(model="glm-4v", api_key=api_key)
assert llm.metadata.is_function_calling_model is False
def test_zhipuai_completions_with_stop():
mock_response = Completion(
model="glm-4",
created=1703487403,
choices=[
CompletionChoice(
index=0,
finish_reason="stop",
message=CompletionMessage(
role="assistant",
content="MOCK_RESPONSE",
),
)
],
usage=CompletionUsage(
prompt_tokens=31, completion_tokens=217, total_tokens=248
),
)
predict_response = CompletionResponse(
text="MOCK_RESPONSE",
additional_kwargs={"tool_calls": []},
raw=mock_response,
)
llm = ZhipuAI(model="glm-4", api_key="__fake_key__")
with mock.patch.object(
llm._client.chat.completions, "create", return_value=mock_response
):
actual_chat = llm.complete("__query__", stop=["stop_words"])
assert actual_chat == predict_response
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
def test_completion():
model = "glm-4"
api_key = os.getenv("ZHIPUAI_API_KEY")
llm = ZhipuAI(model=model, api_key=api_key)
assert llm.complete("who are you")
@pytest.mark.asyncio()
@pytest.mark.skipif(
os.getenv("ZHIPUAI_API_KEY") is None, reason="ZHIPUAI_API_KEY not set"
)
async def test_async_completion():
model = "glm-4"
api_key = os.getenv("ZHIPUAI_API_KEY")
llm = ZhipuAI(model=model, api_key=api_key)
assert await llm.acomplete("who are you")
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
except Exception:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg)
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg)
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Query Sparsity: Active Dimensions: 74.9, Sparsity Ratio: 0.9975
Model Corpus Sparsity: Active Dimensions: 174.8, Sparsity Ratio: 0.9943
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Sparsity Stats Query : Row Non-Zero Mean: 74.93406589214618, Row Sparsity Mean: 0.9975449305314285
Model Sparsity Stats Corpus : Row Non-Zero Mean: 174.8070262028621, Row Sparsity Mean: 0.9942727547425491
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
_base_ = './maskformer_r50_ms-16xb1-75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
patch_size=4,
window_size=12,
mlp_ratio=4,
depths=depths,
num_heads=[6, 12, 24, 48],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside
pixel_decoder=dict(
_delete_=True,
type='PixelDecoder',
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')),
enforce_decoder_input_project=True))
# optimizer
# weight_decay = 0.01
# norm_weight_decay = 0.0
# embed_weight_decay = 0.0
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
norm_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'norm': norm_multi,
'absolute_pos_embed': embed_multi,
'relative_position_bias_table': embed_multi,
'query_embed': embed_multi
}
optim_wrapper = dict(
optimizer=dict(lr=6e-5, weight_decay=0.01),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
max_epochs = 300
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[250],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (64 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py'
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
patch_size=4,
window_size=12,
mlp_ratio=4,
depths=depths,
num_heads=[6, 12, 24, 48],
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside
pixel_decoder=dict(
_delete_=True,
type='PixelDecoder',
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')),
enforce_decoder_input_project=True))
# optimizer
# weight_decay = 0.01
# norm_weight_decay = 0.0
# embed_weight_decay = 0.0
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
norm_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'norm': norm_multi,
'absolute_pos_embed': embed_multi,
'relative_position_bias_table': embed_multi,
'query_embed': embed_multi
}
optim_wrapper = dict(
optimizer=dict(lr=6e-5, weight_decay=0.01),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
max_epochs = 300
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=1e-6, by_epoch=False, begin=0, end=1500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[250],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (64 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
LlavaOnevisionImageProcessor,
LlavaOnevisionProcessor,
Qwen2TokenizerFast,
)
if is_torchvision_available():
from transformers import LlavaOnevisionVideoProcessor
if is_torch_available:
pass
@require_vision
@require_torch
class LlavaOnevisionProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaOnevisionProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaOnevisionImageProcessor()
video_processor = LlavaOnevisionVideoProcessor()
tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>", "<video>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaOnevisionProcessor(
video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
cls.video_token = processor.video_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_image_tokens": 6,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
LlavaOnevisionImageProcessor,
LlavaOnevisionProcessor,
LlavaOnevisionVideoProcessor,
Qwen2TokenizerFast,
)
if is_torch_available:
pass
@require_vision
class LlavaOnevisionProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaOnevisionProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaOnevisionImageProcessor()
video_processor = LlavaOnevisionVideoProcessor()
tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>", "<video>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaOnevisionProcessor(
video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
cls.video_token = processor.video_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_image_tokens": 6,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
|
import os
import sys
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
torch.ops.torchaudio.ffmpeg_set_log_level(8)
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
# On Windows Python-3.8+ has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
if os.name == "nt" and sys.version_info >= (3, 8) and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
os.add_dll_directory(path)
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
def _check_cuda_version():
version = torch.ops.torchaudio.cuda_version()
if version is not None and torch.version.cuda is not None:
version_str = str(version)
ta_version = f"{version_str[:-3]}.{version_str[-2]}"
t_version = torch.version.cuda.split(".")
t_version = f"{t_version[0]}.{t_version[1]}"
if ta_version != t_version:
raise RuntimeError(
"Detected that PyTorch and TorchAudio were compiled with different CUDA versions. "
f"PyTorch has CUDA version {t_version} whereas TorchAudio has CUDA version {ta_version}. "
"Please install the TorchAudio version that matches your PyTorch version."
)
_init_extension()
_check_cuda_version()
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
_LIB_DIR = Path(__file__).parent / "lib"
def _get_lib_path(lib: str):
suffix = "pyd" if os.name == "nt" else "so"
path = _LIB_DIR / f"{lib}.{suffix}"
return path
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file
is not in a standard location.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
True if the library file is found AND the library loaded without failure.
False if the library file is not found (like in the case where torchaudio
is deployed with pex format, thus the shared library file is
in a non-standard location.).
If the library file is found but there is an issue loading the library,
(such as missing dependency) then this function raises the exception as-is.
Raises:
Exception:
If the library file is found, but there is an issue loading the library file,
(when underlying `ctype.DLL` throws an exception), this function will pass
the exception as-is, instead of catching it and returning bool.
The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
is not found.
This behavior was chosen because the expected failure case is not recoverable.
If a dependency is missing, then users have to install it.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
_FFMPEG_INITIALIZED = False
def _init_ffmpeg():
global _FFMPEG_INITIALIZED
if _FFMPEG_INITIALIZED:
return
if not torch.ops.torchaudio.is_ffmpeg_available():
raise RuntimeError(
"torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
)
try:
_load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
import torchaudio._torchaudio_ffmpeg # noqa
torch.ops.torchaudio.ffmpeg_init()
if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
torch.ops.torchaudio.ffmpeg_set_log_level(8)
_FFMPEG_INITIALIZED = True
def _init_extension():
if not _mod_utils.is_module_available("torchaudio._torchaudio"):
warnings.warn("torchaudio C++ extension is not available.")
return
_load_lib("libtorchaudio")
# This import is for initializing the methods registered via PyBind11
# This has to happen after the base library is loaded
from torchaudio import _torchaudio # noqa
# Because this part is executed as part of `import torchaudio`, we ignore the
# initialization failure.
# If the FFmpeg integration is not properly initialized, then detailed error
# will be raised when client code attempts to import the dedicated feature.
try:
_init_ffmpeg()
except Exception:
pass
def _check_cuda_version():
version = torch.ops.torchaudio.cuda_version()
if version is not None and torch.version.cuda is not None:
version_str = str(version)
ta_version = f"{version_str[:-3]}.{version_str[-2]}"
t_version = torch.version.cuda.split(".")
t_version = f"{t_version[0]}.{t_version[1]}"
if ta_version != t_version:
raise RuntimeError(
"Detected that PyTorch and TorchAudio were compiled with different CUDA versions. "
f"PyTorch has CUDA version {t_version} whereas TorchAudio has CUDA version {ta_version}. "
"Please install the TorchAudio version that matches your PyTorch version."
)
_init_extension()
_check_cuda_version()
|
from __future__ import annotations
import json
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
if TYPE_CHECKING:
from collections.abc import Iterator
@pytest.fixture
def mock_jira() -> Iterator[MagicMock]:
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
def test_jira_api_wrapper(self, mock_jira: MagicMock) -> None:
"""Test for Jira API Wrapper using mocks"""
# Configure the mock instance
mock_jira_instance = mock_jira.return_value
# Mock projects method to return mock projects
mock_project1 = MagicMock(key="PROJ1")
mock_project2 = MagicMock(key="PROJ2")
# Set up the mock to return our mock projects
mock_jira_instance.projects.return_value = [mock_project1, mock_project2]
# Initialize wrapper with mocks in place
jira_wrapper = JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=True,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=True,
)
# Test get_projects function
result = jira_wrapper.run("get_projects", "")
# Verify the mock was called and the result contains expected info
mock_jira_instance.projects.assert_called_once()
assert result.startswith("Found 2 projects")
def test_jira_api_wrapper_with_cloud_false(self, mock_jira: MagicMock) -> None:
JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=False,
)
def test_jira_api_wrapper_with_oauth_dict(self, mock_jira: MagicMock) -> None:
oauth_dict = {
"client_id": "test_client_id",
"token": {
"access_token": "test_access_token",
"token_type": "test_token_type",
},
}
oauth_string = json.dumps(oauth_dict)
JiraAPIWrapper(
jira_oauth2=oauth_string,
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
oauth2={"client": None, **oauth_dict},
cloud=False,
)
|
import json
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
@pytest.fixture
def mock_jira(): # type: ignore
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
def test_jira_api_wrapper(self, mock_jira: MagicMock) -> None:
"""Test for Jira API Wrapper using mocks"""
# Configure the mock instance
mock_jira_instance = mock_jira.return_value
# Mock projects method to return mock projects
mock_project1 = MagicMock(key="PROJ1")
mock_project2 = MagicMock(key="PROJ2")
# Set up the mock to return our mock projects
mock_jira_instance.projects.return_value = [mock_project1, mock_project2]
# Initialize wrapper with mocks in place
jira_wrapper = JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=True,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=True,
)
# Test get_projects function
result = jira_wrapper.run("get_projects", "")
# Verify the mock was called and the result contains expected info
mock_jira_instance.projects.assert_called_once()
assert result.startswith("Found 2 projects")
def test_jira_api_wrapper_with_cloud_false(self, mock_jira: MagicMock) -> None:
JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=False,
)
def test_jira_api_wrapper_with_oauth_dict(self, mock_jira: MagicMock) -> None:
oauth_dict = {
"client_id": "test_client_id",
"token": {
"access_token": "test_access_token",
"token_type": "test_token_type",
},
}
oauth_string = json.dumps(oauth_dict)
JiraAPIWrapper(
jira_oauth2=oauth_string,
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
oauth2={"client": None, **oauth_dict},
cloud=False,
)
|
import json
import logging
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
class UpstashRedisChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in an Upstash Redis database."""
def __init__(
self,
session_id: str,
url: str = "",
token: str = "",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash redis python package. "
"Please install it with `pip install upstash_redis`."
)
if url == "" or token == "":
raise ValueError(
"UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN are needed."
)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Upstash Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Upstash Redis"""
self.redis_client.lpush(self.key, json.dumps(message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
def clear(self) -> None:
"""Clear session memory from Upstash Redis"""
self.redis_client.delete(self.key)
|
import json
import logging
from typing import List, Optional
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
class UpstashRedisChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in an Upstash Redis database."""
def __init__(
self,
session_id: str,
url: str = "",
token: str = "",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
"Could not import upstash redis python package. "
"Please install it with `pip install upstash_redis`."
)
if url == "" or token == "":
raise ValueError(
"UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN are needed."
)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error("Upstash Redis instance could not be initiated.")
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Upstash Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Upstash Redis"""
self.redis_client.lpush(self.key, json.dumps(message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
def clear(self) -> None:
"""Clear session memory from Upstash Redis"""
self.redis_client.delete(self.key)
|
import binascii
import codecs
import marshal
import os
import types as python_types
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
def func_dump(func):
"""Serializes a user-defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode("ascii"), "base64")
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode("raw_unicode_escape")
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure
)
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
Args:
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
Returns:
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
def removeprefix(x, prefix):
"""Backport of `removeprefix` from PEP-616 (Python 3.9+)"""
if len(prefix) > 0 and x.startswith(prefix):
return x[len(prefix) :]
else:
return x
def removesuffix(x, suffix):
"""Backport of `removesuffix` from PEP-616 (Python 3.9+)"""
if len(suffix) > 0 and x.endswith(suffix):
return x[: -len(suffix)]
else:
return x
def remove_by_id(lst, value):
"""Remove a value from a list by id."""
for i, v in enumerate(lst):
if id(v) == id(value):
del lst[i]
return
def pythonify_logs(logs):
"""Flatten and convert log values to Python-native types.
This function attempts to convert dict value by `float(value)` and skips
the conversion if it fails.
Args:
logs: A dict containing log values.
Returns:
A flattened dict with values converted to Python-native types if
possible.
"""
logs = logs or {}
result = {}
for key, value in sorted(logs.items()):
if isinstance(value, dict):
result.update(pythonify_logs(value))
else:
try:
value = float(value)
except:
pass
result[key] = value
return result
|
import binascii
import codecs
import marshal
import os
import types as python_types
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
def func_dump(func):
"""Serializes a user-defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Args:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode("ascii"), "base64")
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode("raw_unicode_escape")
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure
)
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Args:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
Args:
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
Returns:
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
def removeprefix(x, prefix):
"""Backport of `removeprefix` from PEP-616 (Python 3.9+)"""
if len(prefix) > 0 and x.startswith(prefix):
return x[len(prefix) :]
else:
return x
def removesuffix(x, suffix):
"""Backport of `removesuffix` from PEP-616 (Python 3.9+)"""
if len(suffix) > 0 and x.endswith(suffix):
return x[: -len(suffix)]
else:
return x
def remove_by_id(lst, value):
"""Remove a value from a list by id."""
for i, v in enumerate(lst):
if id(v) == id(value):
del lst[i]
return
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain:
- an [`AudioUrl`][docarray.typing.url.AudioUrl] (`AudioDoc.url`)
- an [`AudioTensor`](../../../api_references/typing/tensor/audio) (`AudioDoc.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`AudioDoc.embedding`)
- an [`AudioBytes`][docarray.typing.bytes.AudioBytes] (`AudioDoc.bytes_`) object
- an integer representing the frame_rate (`AudioDoc.frame_rate`)
You can use this Document directly:
```python
from docarray.documents import AudioDoc
# use it directly
audio = AudioDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can extend this Document:
```python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(AudioDoc):
name: Optional[TextDoc]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
)
audio.name = TextDoc(text='my first audio')
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
audio: AudioDoc
text: TextDoc
mmdoc = MultiModalDoc(
audio=AudioDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes_.load()
```
"""
url: Optional[AudioUrl] = None
tensor: Optional[AudioTensor] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[AudioBytes] = None
frame_rate: Optional[int] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain:
- an [`AudioUrl`][docarray.typing.url.AudioUrl] (`AudioDoc.url`)
- an [`AudioTensor`](../../../api_references/typing/tensor/audio) (`AudioDoc.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`AudioDoc.embedding`)
- an [`AudioBytes`][docarray.typing.bytes.AudioBytes] (`AudioDoc.bytes_`) object
- an integer representing the frame_rate (`AudioDoc.frame_rate`)
You can use this Document directly:
```python
from docarray.documents import AudioDoc
# use it directly
audio = AudioDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can extend this Document:
```python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(AudioDoc):
name: Optional[TextDoc]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
)
audio.name = TextDoc(text='my first audio')
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
audio: AudioDoc
text: TextDoc
mmdoc = MultiModalDoc(
audio=AudioDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes_.load()
```
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .refseg_metric import RefSegMetric
from .reid_metric import ReIDMetrics
from .semseg_metric import SemSegMetric
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric', 'SemSegMetric', 'RefSegMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults'
]
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain_core.output_parsers import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
PydanticOutputParser,
XMLOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain._api import create_importer
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.datetime import DatetimeOutputParser
from langchain.output_parsers.enum import EnumOutputParser
from langchain.output_parsers.fix import OutputFixingParser
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.regex_dict import RegexDictParser
from langchain.output_parsers.retry import RetryOutputParser, RetryWithErrorOutputParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
from langchain.output_parsers.yaml import YamlOutputParser
if TYPE_CHECKING:
from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BooleanOutputParser",
"CombiningOutputParser",
"CommaSeparatedListOutputParser",
"DatetimeOutputParser",
"EnumOutputParser",
"GuardrailsOutputParser",
"JsonOutputKeyToolsParser",
"JsonOutputToolsParser",
"ListOutputParser",
"MarkdownListOutputParser",
"NumberedListOutputParser",
"OutputFixingParser",
"PandasDataFrameOutputParser",
"PydanticOutputParser",
"PydanticToolsParser",
"RegexDictParser",
"RegexParser",
"ResponseSchema",
"RetryOutputParser",
"RetryWithErrorOutputParser",
"StructuredOutputParser",
"XMLOutputParser",
"YamlOutputParser",
]
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain_core.output_parsers import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
PydanticOutputParser,
XMLOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain._api import create_importer
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.output_parsers.combining import CombiningOutputParser
from langchain.output_parsers.datetime import DatetimeOutputParser
from langchain.output_parsers.enum import EnumOutputParser
from langchain.output_parsers.fix import OutputFixingParser
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
from langchain.output_parsers.regex import RegexParser
from langchain.output_parsers.regex_dict import RegexDictParser
from langchain.output_parsers.retry import RetryOutputParser, RetryWithErrorOutputParser
from langchain.output_parsers.structured import ResponseSchema, StructuredOutputParser
from langchain.output_parsers.yaml import YamlOutputParser
if TYPE_CHECKING:
from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BooleanOutputParser",
"CombiningOutputParser",
"CommaSeparatedListOutputParser",
"DatetimeOutputParser",
"EnumOutputParser",
"GuardrailsOutputParser",
"JsonOutputKeyToolsParser",
"JsonOutputToolsParser",
"ListOutputParser",
"MarkdownListOutputParser",
"NumberedListOutputParser",
"OutputFixingParser",
"PandasDataFrameOutputParser",
"PydanticOutputParser",
"PydanticToolsParser",
"RegexDictParser",
"RegexParser",
"ResponseSchema",
"RetryOutputParser",
"RetryWithErrorOutputParser",
"StructuredOutputParser",
"XMLOutputParser",
"YamlOutputParser",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.structures import InstanceData
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors, points, or bboxes predicted by the model,
shape(n, 4).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=False)
return sampling_result
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.data import InstanceData
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors, points, or bboxes predicted by the model,
shape(n, 4).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=False)
return sampling_result
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain_core.prompts import PromptTemplate
from langchain.chains import create_history_aware_retriever
from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever
def test_create() -> None:
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = FakeParrotRetriever()
question_gen_prompt = PromptTemplate.from_template("hi! {input} {chat_history}")
chain = create_history_aware_retriever(llm, retriever, question_gen_prompt)
expected_output = [Document(page_content="What is the answer?")]
output = chain.invoke({"input": "What is the answer?", "chat_history": []})
assert output == expected_output
output = chain.invoke({"input": "What is the answer?"})
assert output == expected_output
expected_output = [Document(page_content="I know the answer!")]
output = chain.invoke(
{
"input": "What is the answer?",
"chat_history": ["hi", "hi"],
},
)
assert output == expected_output
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain_core.prompts import PromptTemplate
from langchain.chains import create_history_aware_retriever
from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever
def test_create() -> None:
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = FakeParrotRetriever()
question_gen_prompt = PromptTemplate.from_template("hi! {input} {chat_history}")
chain = create_history_aware_retriever(llm, retriever, question_gen_prompt)
expected_output = [Document(page_content="What is the answer?")]
output = chain.invoke({"input": "What is the answer?", "chat_history": []})
assert output == expected_output
output = chain.invoke({"input": "What is the answer?"})
assert output == expected_output
expected_output = [Document(page_content="I know the answer!")]
output = chain.invoke(
{
"input": "What is the answer?",
"chat_history": ["hi", "hi"],
}
)
assert output == expected_output
|
# Copyright (c) OpenMMLab. All rights reserved.
"""This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the object defines a ``__len__``
method, then the ``__nice__`` method defaults to something sensible, otherwise
it is treated as abstract and raises ``NotImplementedError``.
To use simply have your object inherit from :class:`NiceRepr`
(multi-inheritance should be ok).
This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
Example:
>>> # Objects that define __nice__ have a default __str__ and __repr__
>>> class Student(NiceRepr):
... def __init__(self, name):
... self.name = name
... def __nice__(self):
... return self.name
>>> s1 = Student('Alice')
>>> s2 = Student('Bob')
>>> print(f's1 = {s1}')
>>> print(f's2 = {s2}')
s1 = <Student(Alice)>
s2 = <Student(Bob)>
Example:
>>> # Objects that define __len__ have a default __nice__
>>> class Group(NiceRepr):
... def __init__(self, data):
... self.data = data
... def __len__(self):
... return len(self.data)
>>> g = Group([1, 2, 3])
>>> print(f'g = {g}')
g = <Group(3)>
"""
import warnings
class NiceRepr:
"""Inherit from this class and define ``__nice__`` to "nicely" print your
objects.
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
If the inheriting class has a ``__len__``, method then the default
``__nice__`` method will return its length.
Example:
>>> class Foo(NiceRepr):
... def __nice__(self):
... return 'info'
>>> foo = Foo()
>>> assert str(foo) == '<Foo(info)>'
>>> assert repr(foo).startswith('<Foo(info) at ')
Example:
>>> class Bar(NiceRepr):
... pass
>>> bar = Bar()
>>> import pytest
>>> with pytest.warns(None) as record:
>>> assert 'object at' in str(bar)
>>> assert 'object at' in repr(bar)
Example:
>>> class Baz(NiceRepr):
... def __len__(self):
... return 5
>>> baz = Baz()
>>> assert str(baz) == '<Baz(5)>'
"""
def __nice__(self):
"""str: a "nice" summary string describing this module"""
if hasattr(self, '__len__'):
# It is a common pattern for objects to use __len__ in __nice__
# As a convenience we define a default __nice__ for these objects
return str(len(self))
else:
# In all other cases force the subclass to overload __nice__
raise NotImplementedError(
f'Define the __nice__ method for {self.__class__!r}')
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f'<{classname}({nice}) at {hex(id(self))}>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f'<{classname}({nice})>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
|
"""This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the object defines a ``__len__``
method, then the ``__nice__`` method defaults to something sensible, otherwise
it is treated as abstract and raises ``NotImplementedError``.
To use simply have your object inherit from :class:`NiceRepr`
(multi-inheritance should be ok).
This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
Example:
>>> # Objects that define __nice__ have a default __str__ and __repr__
>>> class Student(NiceRepr):
... def __init__(self, name):
... self.name = name
... def __nice__(self):
... return self.name
>>> s1 = Student('Alice')
>>> s2 = Student('Bob')
>>> print(f's1 = {s1}')
>>> print(f's2 = {s2}')
s1 = <Student(Alice)>
s2 = <Student(Bob)>
Example:
>>> # Objects that define __len__ have a default __nice__
>>> class Group(NiceRepr):
... def __init__(self, data):
... self.data = data
... def __len__(self):
... return len(self.data)
>>> g = Group([1, 2, 3])
>>> print(f'g = {g}')
g = <Group(3)>
"""
import warnings
class NiceRepr:
"""Inherit from this class and define ``__nice__`` to "nicely" print your
objects.
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
If the inheriting class has a ``__len__``, method then the default
``__nice__`` method will return its length.
Example:
>>> class Foo(NiceRepr):
... def __nice__(self):
... return 'info'
>>> foo = Foo()
>>> assert str(foo) == '<Foo(info)>'
>>> assert repr(foo).startswith('<Foo(info) at ')
Example:
>>> class Bar(NiceRepr):
... pass
>>> bar = Bar()
>>> import pytest
>>> with pytest.warns(None) as record:
>>> assert 'object at' in str(bar)
>>> assert 'object at' in repr(bar)
Example:
>>> class Baz(NiceRepr):
... def __len__(self):
... return 5
>>> baz = Baz()
>>> assert str(baz) == '<Baz(5)>'
"""
def __nice__(self):
"""str: a "nice" summary string describing this module"""
if hasattr(self, '__len__'):
# It is a common pattern for objects to use __len__ in __nice__
# As a convenience we define a default __nice__ for these objects
return str(len(self))
else:
# In all other cases force the subclass to overload __nice__
raise NotImplementedError(
f'Define the __nice__ method for {self.__class__!r}')
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f'<{classname}({nice}) at {hex(id(self))}>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f'<{classname}({nice})>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(Formatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, **tf_tensor_kwargs):
super().__init__(features=features)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(Formatter[dict, "tf.Tensor", dict]):
def __init__(self, features=None, decoded=True, **tf_tensor_kwargs):
super().__init__(features=features, decoded=decoded)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './faster_rcnn_r50_caffe_c4_1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet*
:cite:`Luo_2019` trained on *Libri2Mix dataset* :cite:`cosentino2020librimix`.
The source separation model is constructed by :func:`~torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on MUSDB-HQ :cite:`MUSDB18HQ`
and additional internal training data.
The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on MUSDB-HQ :cite:`MUSDB18HQ`.
The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :class:`SourceSeparationBundle` for usage instructions.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet* :cite:`Luo_2019` trained on
*Libri2Mix dataset* :cite:`cosentino2020librimix`.
The source separation model is constructed by :py:func:`torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* :cite:`defossez2021hybrid` pipeline for music
source separation trained on MUSDB-HQ :cite:`MUSDB18HQ` and additional internal training data.
The model is constructed by :py:func:`torchaudio.prototype.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained *Hybrid Demucs* :cite:`defossez2021hybrid` pipeline for music
source separation trained on MUSDB-HQ :cite:`MUSDB18HQ`.
The model is constructed by :py:func:`torchaudio.prototype.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / "VERSION.txt").read_text(encoding="utf-8").strip().replace("rc", "-rc")
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding="utf-8")
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / 'VERSION.txt').read_text(encoding='utf-8').strip().replace('rc', '-rc')
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding='utf-8')
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding='utf-8')
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding='utf-8')
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Union
import torch
from mmengine.device import is_cuda_available, is_musa_available
from mmengine.dist.utils import master_only
from mmengine.logging import MMLogger, print_log
class TimeCounter:
"""A tool that counts the average running time of a function or a method.
Users can use it as a decorator or context manager to calculate the average
running time of code blocks.
Args:
log_interval (int): The interval of logging. Defaults to 1.
warmup_interval (int): The interval of warmup. Defaults to 1.
with_sync (bool): Whether to synchronize cuda. Defaults to True.
tag (str, optional): Function tag. Used to distinguish between
different functions or methods being called. Defaults to None.
logger (MMLogger, optional): Formatted logger used to record messages.
Defaults to None.
Examples:
>>> import time
>>> from mmengine.utils.dl_utils import TimeCounter
>>> @TimeCounter()
... def fun1():
... time.sleep(0.1)
... fun1()
[fun1]-time per run averaged in the past 1 runs: 100.0 ms
>>> @@TimeCounter(log_interval=2, tag='fun')
... def fun2():
... time.sleep(0.2)
>>> for _ in range(3):
... fun2()
[fun]-time per run averaged in the past 2 runs: 200.0 ms
>>> with TimeCounter(tag='fun3'):
... time.sleep(0.3)
[fun3]-time per run averaged in the past 1 runs: 300.0 ms
"""
instance_dict: dict = dict()
log_interval: int
warmup_interval: int
logger: Optional[MMLogger]
__count: int
__pure_inf_time: float
def __new__(cls,
log_interval: int = 1,
warmup_interval: int = 1,
with_sync: bool = True,
tag: Optional[str] = None,
logger: Optional[MMLogger] = None):
assert warmup_interval >= 1
if tag is not None and tag in cls.instance_dict:
return cls.instance_dict[tag]
instance = super().__new__(cls)
cls.instance_dict[tag] = instance
instance.log_interval = log_interval
instance.warmup_interval = warmup_interval
instance.with_sync = with_sync
instance.tag = tag
instance.logger = logger
instance.__count = 0
instance.__pure_inf_time = 0.
instance.__start_time = 0.
return instance
@master_only
def __call__(self, fn):
if self.tag is None:
self.tag = fn.__name__
def wrapper(*args, **kwargs):
self.__count += 1
if self.with_sync:
if is_cuda_available():
torch.cuda.synchronize()
elif is_musa_available():
torch.musa.synchronize()
start_time = time.perf_counter()
result = fn(*args, **kwargs)
if self.with_sync:
if is_cuda_available():
torch.cuda.synchronize()
elif is_musa_available():
torch.musa.synchronize()
elapsed = time.perf_counter() - start_time
self.print_time(elapsed)
return result
return wrapper
@master_only
def __enter__(self):
assert self.tag is not None, 'In order to clearly distinguish ' \
'printing information in different ' \
'contexts, please specify the ' \
'tag parameter'
self.__count += 1
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.__start_time = time.perf_counter()
@master_only
def __exit__(self, exc_type, exc_val, exc_tb):
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
elapsed = time.perf_counter() - self.__start_time
self.print_time(elapsed)
def print_time(self, elapsed: Union[int, float]) -> None:
"""print times per count."""
if self.__count >= self.warmup_interval:
self.__pure_inf_time += elapsed
if self.__count % self.log_interval == 0:
times_per_count = 1000 * self.__pure_inf_time / (
self.__count - self.warmup_interval + 1)
print_log(
f'[{self.tag}]-time per run averaged in the past '
f'{self.__count} runs: {times_per_count:.1f} ms',
self.logger)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Union
import torch
from mmengine.dist.utils import master_only
from mmengine.logging import MMLogger, print_log
class TimeCounter:
"""A tool that counts the average running time of a function or a method.
Users can use it as a decorator or context manager to calculate the average
running time of code blocks.
Args:
log_interval (int): The interval of logging. Defaults to 1.
warmup_interval (int): The interval of warmup. Defaults to 1.
with_sync (bool): Whether to synchronize cuda. Defaults to True.
tag (str, optional): Function tag. Used to distinguish between
different functions or methods being called. Defaults to None.
logger (MMLogger, optional): Formatted logger used to record messages.
Defaults to None.
Examples:
>>> import time
>>> from mmengine.utils.dl_utils import TimeCounter
>>> @TimeCounter()
... def fun1():
... time.sleep(0.1)
... fun1()
[fun1]-time per run averaged in the past 1 runs: 100.0 ms
>>> @@TimeCounter(log_interval=2, tag='fun')
... def fun2():
... time.sleep(0.2)
>>> for _ in range(3):
... fun2()
[fun]-time per run averaged in the past 2 runs: 200.0 ms
>>> with TimeCounter(tag='fun3'):
... time.sleep(0.3)
[fun3]-time per run averaged in the past 1 runs: 300.0 ms
"""
instance_dict: dict = dict()
log_interval: int
warmup_interval: int
logger: Optional[MMLogger]
__count: int
__pure_inf_time: float
def __new__(cls,
log_interval: int = 1,
warmup_interval: int = 1,
with_sync: bool = True,
tag: Optional[str] = None,
logger: Optional[MMLogger] = None):
assert warmup_interval >= 1
if tag is not None and tag in cls.instance_dict:
return cls.instance_dict[tag]
instance = super().__new__(cls)
cls.instance_dict[tag] = instance
instance.log_interval = log_interval
instance.warmup_interval = warmup_interval
instance.with_sync = with_sync
instance.tag = tag
instance.logger = logger
instance.__count = 0
instance.__pure_inf_time = 0.
instance.__start_time = 0.
return instance
@master_only
def __call__(self, fn):
if self.tag is None:
self.tag = fn.__name__
def wrapper(*args, **kwargs):
self.__count += 1
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
start_time = time.perf_counter()
result = fn(*args, **kwargs)
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
self.print_time(elapsed)
return result
return wrapper
@master_only
def __enter__(self):
assert self.tag is not None, 'In order to clearly distinguish ' \
'printing information in different ' \
'contexts, please specify the ' \
'tag parameter'
self.__count += 1
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.__start_time = time.perf_counter()
@master_only
def __exit__(self, exc_type, exc_val, exc_tb):
if self.with_sync and torch.cuda.is_available():
torch.cuda.synchronize()
elapsed = time.perf_counter() - self.__start_time
self.print_time(elapsed)
def print_time(self, elapsed: Union[int, float]) -> None:
"""print times per count."""
if self.__count >= self.warmup_interval:
self.__pure_inf_time += elapsed
if self.__count % self.log_interval == 0:
times_per_count = 1000 * self.__pure_inf_time / (
self.__count - self.warmup_interval + 1)
print_log(
f'[{self.tag}]-time per run averaged in the past '
f'{self.__count} runs: {times_per_count:.1f} ms',
self.logger)
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = [
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
]
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = [
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
]
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
import logging
import sys
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
# test whether suppress root handlers
logging.root.handlers.append(logging.StreamHandler(sys.stdout))
with JinaLogger('test_logger', suppress_root_logging=False) as logger:
log(logger)
assert len(logging.root.handlers) > 0
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logging.root.handlers) == 0
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import TreebankWordDetokenizer, word_tokenize
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
:param sentences: A list of sentences
:param noise_fn: A noise function: Given a string, it returns a string with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize, TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class UnusableObjectError(NotImplementedError):
...
|
class UnusableObjectError(NotImplementedError):
...
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecay as CosineDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts as CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay as ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay as InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
LearningRateSchedule as LearningRateSchedule,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay as PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay as PolynomialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
deserialize as deserialize,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
serialize as serialize,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
LearningRateSchedule,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import deserialize
from keras.src.optimizers.schedules.learning_rate_schedule import serialize
|
import pytest
from llama_index.core import MockEmbedding
from llama_index.core.chat_engine.condense_plus_context import (
CondensePlusContextChatEngine,
)
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.llms.mock import MockLLM
from llama_index.core.schema import Document
SYSTEM_PROMPT = "Talk like a pirate."
@pytest.fixture()
def chat_engine() -> CondensePlusContextChatEngine:
index = VectorStoreIndex.from_documents(
[Document.example()], embed_model=MockEmbedding(embed_dim=3)
)
retriever = index.as_retriever()
return CondensePlusContextChatEngine.from_defaults(
retriever, llm=MockLLM(), system_prompt=SYSTEM_PROMPT
)
def test_chat(chat_engine: CondensePlusContextChatEngine):
response = chat_engine.chat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.chat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
def test_chat_stream(chat_engine: CondensePlusContextChatEngine):
response = chat_engine.stream_chat("Hello World!")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.stream_chat("What is the capital of the moon?")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio
async def test_achat(chat_engine: CondensePlusContextChatEngine):
response = await chat_engine.achat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.achat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio
async def test_chat_astream(chat_engine: CondensePlusContextChatEngine):
response = await chat_engine.astream_chat("Hello World!")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
|
import pytest
from llama_index.core import MockEmbedding
from llama_index.core.chat_engine.condense_plus_context import (
CondensePlusContextChatEngine,
)
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.llms.mock import MockLLM
from llama_index.core.schema import Document
SYSTEM_PROMPT = "Talk like a pirate."
@pytest.fixture()
def chat_engine() -> CondensePlusContextChatEngine:
index = VectorStoreIndex.from_documents(
[Document.example()], embed_model=MockEmbedding(embed_dim=3)
)
retriever = index.as_retriever()
return CondensePlusContextChatEngine.from_defaults(
retriever, llm=MockLLM(), system_prompt=SYSTEM_PROMPT
)
def test_chat(chat_engine: CondensePlusContextChatEngine):
response = chat_engine.chat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.chat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
def test_chat_stream(chat_engine: CondensePlusContextChatEngine):
response = chat_engine.stream_chat("Hello World!")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = chat_engine.stream_chat("What is the capital of the moon?")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio()
async def test_achat(chat_engine: CondensePlusContextChatEngine):
response = await chat_engine.achat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.achat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio()
async def test_chat_astream(chat_engine: CondensePlusContextChatEngine):
response = await chat_engine.astream_chat("Hello World!")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
response = await chat_engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._deepspeed import DeepSpeedOptimWrapper
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'ApexOptimWrapper', 'OptimWrapperDict',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper', 'DeepSpeedOptimWrapper'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import is_installed
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'ApexOptimWrapper', 'OptimWrapperDict',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper'
]
if is_installed('deepspeed'):
from ._deepspeed import DeepSpeedOptimWrapper # noqa:F401
__all__.append('DeepSpeedOptimWrapper')
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH_IS_CONFIGURED = bool(
secrets.github_client_id and secrets.github_client_secret
)
GithubCredentials = APIKeyCredentials | OAuth2Credentials
GithubCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.GITHUB],
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
GithubFineGrainedAPICredentials = APIKeyCredentials
GithubFineGrainedAPICredentialsInput = CredentialsMetaInput[
Literal[ProviderName.GITHUB], Literal["api_key"]
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
Creates a GitHub credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
def GithubFineGrainedAPICredentialsField(
scope: str,
) -> GithubFineGrainedAPICredentialsInput:
return CredentialsField(
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
TEST_FINE_GRAINED_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_FINE_GRAINED_CREDENTIALS_INPUT = {
"provider": TEST_FINE_GRAINED_CREDENTIALS.provider,
"id": TEST_FINE_GRAINED_CREDENTIALS.id,
"type": TEST_FINE_GRAINED_CREDENTIALS.type,
"title": TEST_FINE_GRAINED_CREDENTIALS.type,
}
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH_IS_CONFIGURED = bool(
secrets.github_client_id and secrets.github_client_secret
)
GithubCredentials = APIKeyCredentials | OAuth2Credentials
GithubCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.GITHUB],
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
Creates a GitHub credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from typing import Any, Optional, Union
import torch
from torch import nn
def _replace_relu(module: nn.Module) -> None:
reassign = {}
for name, mod in module.named_children():
_replace_relu(mod)
# Checking for explicit type instead of instance
# as we only want to replace modules of the exact type
# not inherited classes
if type(mod) is nn.ReLU or type(mod) is nn.ReLU6:
reassign[name] = nn.ReLU(inplace=False)
for key, value in reassign.items():
module._modules[key] = value
def quantize_model(model: nn.Module, backend: str) -> None:
_dummy_input_data = torch.rand(1, 3, 299, 299)
if backend not in torch.backends.quantized.supported_engines:
raise RuntimeError("Quantized backend not supported ")
torch.backends.quantized.engine = backend
model.eval()
# Make sure that weight qconfig matches that of the serialized models
if backend == "fbgemm":
model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
activation=torch.ao.quantization.default_observer,
weight=torch.ao.quantization.default_per_channel_weight_observer,
)
elif backend == "qnnpack":
model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
activation=torch.ao.quantization.default_observer, weight=torch.ao.quantization.default_weight_observer
)
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
model.fuse_model() # type: ignore[operator]
torch.ao.quantization.prepare(model, inplace=True)
model(_dummy_input_data)
torch.ao.quantization.convert(model, inplace=True)
def _fuse_modules(
model: nn.Module, modules_to_fuse: Union[list[str], list[list[str]]], is_qat: Optional[bool], **kwargs: Any
):
if is_qat is None:
is_qat = model.training
method = torch.ao.quantization.fuse_modules_qat if is_qat else torch.ao.quantization.fuse_modules
return method(model, modules_to_fuse, **kwargs)
|
from typing import Any, List, Optional, Union
import torch
from torch import nn
def _replace_relu(module: nn.Module) -> None:
reassign = {}
for name, mod in module.named_children():
_replace_relu(mod)
# Checking for explicit type instead of instance
# as we only want to replace modules of the exact type
# not inherited classes
if type(mod) is nn.ReLU or type(mod) is nn.ReLU6:
reassign[name] = nn.ReLU(inplace=False)
for key, value in reassign.items():
module._modules[key] = value
def quantize_model(model: nn.Module, backend: str) -> None:
_dummy_input_data = torch.rand(1, 3, 299, 299)
if backend not in torch.backends.quantized.supported_engines:
raise RuntimeError("Quantized backend not supported ")
torch.backends.quantized.engine = backend
model.eval()
# Make sure that weight qconfig matches that of the serialized models
if backend == "fbgemm":
model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
activation=torch.ao.quantization.default_observer,
weight=torch.ao.quantization.default_per_channel_weight_observer,
)
elif backend == "qnnpack":
model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
activation=torch.ao.quantization.default_observer, weight=torch.ao.quantization.default_weight_observer
)
# TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
model.fuse_model() # type: ignore[operator]
torch.ao.quantization.prepare(model, inplace=True)
model(_dummy_input_data)
torch.ao.quantization.convert(model, inplace=True)
def _fuse_modules(
model: nn.Module, modules_to_fuse: Union[List[str], List[List[str]]], is_qat: Optional[bool], **kwargs: Any
):
if is_qat is None:
is_qat = model.training
method = torch.ao.quantization.fuse_modules_qat if is_qat else torch.ao.quantization.fuse_modules
return method(model, modules_to_fuse, **kwargs)
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class LanguageModeling(TaskTemplate):
task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({})
text_column: str = "text"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text"}
|
from dataclasses import dataclass
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class LanguageModeling(TaskTemplate):
task: str = "language-modeling"
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({})
text_column: str = "text"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text"}
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
pass
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseEncoderModelCardCallback(SentenceTransformerModelCardCallback):
pass
@dataclass
class SparseEncoderModelCardData(SentenceTransformerModelCardData):
"""A dataclass storing data used in the model card.
Args:
language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
e.g. "en" or ["en", "de", "nl"]
license (`Optional[str]`): The license of the model, e.g. "apache-2.0", "mit",
or "cc-by-nc-sa-4.0"
model_name (`Optional[str]`): The pretty name of the model, e.g. "SparseEncoder based on answerdotai/ModernBERT-base".
model_id (`Optional[str]`): The model ID when pushing the model to the Hub,
e.g. "tomaarsen/se-mpnet-base-ms-marco".
train_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the training datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}, {"name": "STSB"}]
eval_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the evaluation datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"id": "mteb/stsbenchmark-sts"}]
task_name (`str`): The human-readable task the model is trained on,
e.g. "semantic search and sparse retrieval".
tags (`Optional[List[str]]`): A list of tags for the model,
e.g. ["sentence-transformers", "sparse-encoder"].
.. tip::
Install `codecarbon <https://github.com/mlco2/codecarbon>`_ to automatically track carbon emission usage and
include it in your model cards.
Example::
>>> model = SparseEncoder(
... "microsoft/mpnet-base",
... model_card_data=SparseEncoderModelCardData(
... model_id="tomaarsen/se-mpnet-base-allnli",
... train_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... eval_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... license="apache-2.0",
... language="en",
... ),
... )
"""
# Potentially provided by the user
task_name: str = field(default=None)
tags: list[str] | None = field(
default_factory=lambda: [
"sentence-transformers",
"sparse-encoder",
]
)
# Automatically filled by `SparseEncoderModelCardCallback` and the Trainer directly
predict_example: list[list[str]] | None = field(default=None, init=False)
# Computed once, always unchanged
pipeline_tag: str = field(default=None, init=False)
template_path: Path = field(default=Path(__file__).parent / "model_card_template.md", init=False)
# Passed via `register_model` only
model: SparseEncoder | None = field(default=None, init=False, repr=False)
def register_model(self, model: SparseEncoder) -> None:
self.model = model
if self.task_name is None:
self.task_name = "semantic search and sparse retrieval"
if self.pipeline_tag is None:
self.pipeline_tag = "feature-extraction"
def tokenize(self, text: str | list[str]) -> dict[str, Any]:
return self.model.tokenizer(text)
def get_model_specific_metadata(self) -> dict[str, Any]:
similarity_fn_name = "Dot Product"
if self.model.similarity_fn_name:
similarity_fn_name = {
"cosine": "Cosine Similarity",
"dot": "Dot Product",
"euclidean": "Euclidean Distance",
"manhattan": "Manhattan Distance",
}.get(self.model.similarity_fn_name, self.model.similarity_fn_name.replace("_", " ").title())
return {
"model_max_length": self.model.get_max_seq_length(),
"output_dimensionality": self.model.get_sentence_embedding_dimension(),
"model_string": str(self.model),
"similarity_fn_name": similarity_fn_name,
}
|
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
pass
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseEncoderModelCardCallback(SentenceTransformerModelCardCallback):
pass
@dataclass
class SparseEncoderModelCardData(SentenceTransformerModelCardData):
"""A dataclass storing data used in the model card.
Args:
language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
e.g. "en" or ["en", "de", "nl"]
license (`Optional[str]`): The license of the model, e.g. "apache-2.0", "mit",
or "cc-by-nc-sa-4.0"
model_name (`Optional[str]`): The pretty name of the model, e.g. "SparseEncoder based on answerdotai/ModernBERT-base".
model_id (`Optional[str]`): The model ID when pushing the model to the Hub,
e.g. "tomaarsen/se-mpnet-base-ms-marco".
train_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the training datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}, {"name": "STSB"}]
eval_datasets (`List[Dict[str, str]]`): A list of the names and/or Hugging Face dataset IDs of the evaluation datasets.
e.g. [{"name": "SNLI", "id": "stanfordnlp/snli"}, {"id": "mteb/stsbenchmark-sts"}]
task_name (`str`): The human-readable task the model is trained on,
e.g. "semantic search and sparse retrieval".
tags (`Optional[List[str]]`): A list of tags for the model,
e.g. ["sentence-transformers", "sparse-encoder"].
.. tip::
Install `codecarbon <https://github.com/mlco2/codecarbon>`_ to automatically track carbon emission usage and
include it in your model cards.
Example::
>>> model = SparseEncoder(
... "microsoft/mpnet-base",
... model_card_data=SparseEncoderModelCardData(
... model_id="tomaarsen/se-mpnet-base-allnli",
... train_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... eval_datasets=[{"name": "SNLI", "id": "stanfordnlp/snli"}, {"name": "MultiNLI", "id": "nyu-mll/multi_nli"}],
... license="apache-2.0",
... language="en",
... ),
... )
"""
# Potentially provided by the user
task_name: str = field(default=None)
tags: list[str] | None = field(
default_factory=lambda: [
"sentence-transformers",
"sparse-encoder",
]
)
# Automatically filled by `SparseEncoderModelCardCallback` and the Trainer directly
predict_example: list[list[str]] | None = field(default=None, init=False)
# Computed once, always unchanged
pipeline_tag: str = field(default=None, init=False)
template_path: Path = field(default=Path(__file__).parent / "model_card_template.md", init=False)
# Passed via `register_model` only
model: SparseEncoder | None = field(default=None, init=False, repr=False)
def register_model(self, model: SparseEncoder) -> None:
self.model = model
if self.task_name is None:
self.task_name = "semantic search and sparse retrieval"
if self.pipeline_tag is None:
self.pipeline_tag = "feature-extraction"
def tokenize(self, text: str | list[str]) -> dict[str, Any]:
return self.model.tokenizer(text)
def get_model_specific_metadata(self) -> dict[str, Any]:
return {
"model_max_length": self.model.get_max_seq_length(),
"output_dimensionality": self.model.get_sentence_embedding_dimension(),
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class LlamaLLM(OpenAILike):
"""
Llama LLM.
Examples:
`pip install llama-index-llms-meta`
```python
from llama_index.llms.meta import LlamaLLM
# set api key in env or in llm
# import os
# os.environ["LLAMA_API_KEY"] = "your api key"
llm = LlamaLLM(
model="Llama-3.3-8B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str = "Llama-3.3-8B-Instruct",
api_key: Optional[str] = None,
api_base: str = "https://api.llama.com/compat/v1",
is_chat_model: bool = True,
# Slightly lower to account for tokenization defaults
context_window: int = 120000,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("LLAMA_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
context_window=context_window,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "LlamaLLM"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class LlamaLLM(OpenAILike):
"""
Llama LLM.
Examples:
`pip install llama-index-llms-meta`
```python
from llama_index.llms.meta import LlamaLLM
# set api key in env or in llm
# import os
# os.environ["LLAMA_API_KEY"] = "your api key"
llm = LlamaLLM(
model="Llama-3.3-8B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str = "Llama-3.3-8B-Instruct",
api_key: Optional[str] = None,
api_base: str = "https://api.llama.com/compat/v1",
is_chat_model: bool = True,
# Slightly lower to account for tokenization defaults
context_window: int = 120000,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("LLAMA_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
context_window=context_window,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "LlamaLLM"
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if _port:
standardized_host += f':{_port}'
c = Client(host=standardized_host)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
from typing import TYPE_CHECKING, Optional, Dict
if TYPE_CHECKING:
from ... import DocumentArray
class PostMixin:
"""Helper functions for posting DocumentArray to Jina Flow."""
def post(
self,
host: str,
show_progress: bool = False,
batch_size: Optional[int] = None,
parameters: Optional[Dict] = None,
**kwargs,
) -> 'DocumentArray':
"""Posting itself to a remote Flow/Sandbox and get the modified DocumentArray back
:param host: a host string. Can be one of the following:
- `grpc://192.168.0.123:8080/endpoint`
- `ws://192.168.0.123:8080/endpoint`
- `http://192.168.0.123:8080/endpoint`
- `jinahub://Hello/endpoint`
- `jinahub+docker://Hello/endpoint`
- `jinahub+sandbox://Hello/endpoint`
:param show_progress: if to show a progressbar
:param batch_size: number of Document on each request
:param parameters: parameters to send in the request
:return: the new DocumentArray returned from remote
"""
if not self:
return
from urllib.parse import urlparse
r = urlparse(host)
_on = r.path or '/'
_port = r.port or None
standardized_host = (
r._replace(netloc=r.netloc.replace(f':{r.port}', ''))
._replace(path='')
.geturl()
)
batch_size = batch_size or len(self)
_scheme = r.scheme
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme.startswith('jinahub'):
from jina import Flow
f = Flow(quiet=True, prefetch=1).add(uses=standardized_host)
with f:
return f.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
**kwargs,
)
elif _scheme in ('grpc', 'http', 'ws', 'websocket'):
from jina import Client
if _port:
standardized_host += f':{_port}'
c = Client(host=standardized_host)
return c.post(
_on,
inputs=self,
show_progress=show_progress,
request_size=batch_size,
parameters=parameters,
)
else:
raise ValueError(f'unsupported scheme: {r.scheme}')
|
import logging
import typing
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import prisma
import backend.data.graph
import backend.integrations.creds_manager
import backend.integrations.webhooks.graph_lifecycle_hooks
import backend.server.v2.library.db
import backend.server.v2.library.model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter()
integration_creds_manager = (
backend.integrations.creds_manager.IntegrationCredentialsManager()
)
@router.get(
"/agents",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
)
async def get_library_agents(
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
]
) -> typing.Sequence[backend.server.v2.library.model.LibraryAgent]:
"""
Get all agents in the user's library, including both created and saved agents.
"""
try:
agents = await backend.server.v2.library.db.get_library_agents(user_id)
return agents
except Exception:
logger.exception("Exception occurred whilst getting library agents")
raise fastapi.HTTPException(
status_code=500, detail="Failed to get library agents"
)
@router.post(
"/agents/{store_listing_version_id}",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
status_code=201,
)
async def add_agent_to_library(
store_listing_version_id: str,
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
],
) -> fastapi.Response:
"""
Add an agent from the store to the user's library.
Args:
store_listing_version_id (str): ID of the store listing version to add
user_id (str): ID of the authenticated user
Returns:
fastapi.Response: 201 status code on success
Raises:
HTTPException: If there is an error adding the agent to the library
"""
try:
# Get the graph from the store listing
store_listing_version = (
await prisma.models.StoreListingVersion.prisma().find_unique(
where={"id": store_listing_version_id}, include={"Agent": True}
)
)
if not store_listing_version or not store_listing_version.Agent:
raise fastapi.HTTPException(
status_code=404,
detail=f"Store listing version {store_listing_version_id} not found",
)
agent = store_listing_version.Agent
if agent.userId == user_id:
raise fastapi.HTTPException(
status_code=400, detail="Cannot add own agent to library"
)
# Create a new graph from the template
graph = await backend.data.graph.get_graph(
agent.id, agent.version, template=True, user_id=user_id
)
if not graph:
raise fastapi.HTTPException(
status_code=404, detail=f"Agent {agent.id} not found"
)
# Create a deep copy with new IDs
graph.version = 1
graph.is_template = False
graph.is_active = True
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
# Save the new graph
graph = await backend.data.graph.create_graph(graph, user_id=user_id)
graph = (
await backend.integrations.webhooks.graph_lifecycle_hooks.on_graph_activate(
graph,
get_credentials=lambda id: integration_creds_manager.get(user_id, id),
)
)
return fastapi.Response(status_code=201)
except Exception:
logger.exception("Exception occurred whilst adding agent to library")
raise fastapi.HTTPException(
status_code=500, detail="Failed to add agent to library"
)
|
import logging
import typing
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import backend.data.graph
import backend.server.v2.library.db
import backend.server.v2.library.model
logger = logging.getLogger(__name__)
router = fastapi.APIRouter()
@router.get(
"/agents",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
)
async def get_library_agents(
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
]
) -> typing.Sequence[backend.server.v2.library.model.LibraryAgent]:
"""
Get all agents in the user's library, including both created and saved agents.
"""
try:
agents = await backend.server.v2.library.db.get_library_agents(user_id)
return agents
except Exception:
logger.exception("Exception occurred whilst getting library agents")
raise fastapi.HTTPException(
status_code=500, detail="Failed to get library agents"
)
@router.post(
"/agents/{store_listing_version_id}",
tags=["library", "private"],
dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)],
status_code=201,
)
async def add_agent_to_library(
store_listing_version_id: str,
user_id: typing.Annotated[
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
],
) -> fastapi.Response:
"""
Add an agent from the store to the user's library.
Args:
store_listing_version_id (str): ID of the store listing version to add
user_id (str): ID of the authenticated user
Returns:
fastapi.Response: 201 status code on success
Raises:
HTTPException: If there is an error adding the agent to the library
"""
try:
await backend.server.v2.library.db.add_agent_to_library(
store_listing_version_id=store_listing_version_id, user_id=user_id
)
return fastapi.Response(status_code=201)
except Exception:
logger.exception("Exception occurred whilst adding agent to library")
raise fastapi.HTTPException(
status_code=500, detail="Failed to add agent to library"
)
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__validate_getitem__(item)
return cls._create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
from typing import Type, TYPE_CHECKING
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :class:`DocumentArray` object with :attr:`size` empty
:class:`Document` objects.
:param size: the number of empty Documents in this container
:return: a :class:`DocumentArray` object
"""
return cls((Document() for _ in range(size)), *args, **kwargs)
|
from typing import Type, TYPE_CHECKING
from ... import Document
if TYPE_CHECKING:
from ...typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :class:`DocumentArray` object with :attr:`size` empty
:class:`Document` objects.
:param size: the number of empty Documents in this container
:return: a :class:`DocumentArray` object
"""
return cls((Document() for _ in range(size)), *args, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric'
]
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default=[],
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m,), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
import xgboost as xgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m,), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""Guideline evaluation."""
import asyncio
import logging
from typing import Any, Optional, Sequence, Union, cast
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.llms.llm import LLM
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.settings import Settings
logger = logging.getLogger(__name__)
DEFAULT_GUIDELINES = (
"The response should fully answer the query.\n"
"The response should avoid being vague or ambiguous.\n"
"The response should be specific and use statistics or numbers when possible.\n"
)
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
"Here is the original query:\n"
"Query: {query}\n"
"Critique the following response based on the guidelines below:\n"
"Response: {response}\n"
"Guidelines: {guidelines}\n"
"Now please provide constructive criticism.\n"
)
class EvaluationData(BaseModel):
passing: bool = Field(description="Whether the response passes the guidelines.")
feedback: str = Field(
description="The feedback for the response based on the guidelines."
)
class GuidelineEvaluator(BaseEvaluator):
"""
Guideline evaluator.
Evaluates whether a query and response pair passes the given guidelines.
This evaluator only considers the query string and the response string.
Args:
guidelines(Optional[str]): User-added guidelines to use for evaluation.
Defaults to None, which uses the default guidelines.
eval_template(Optional[Union[str, BasePromptTemplate]] ):
The template to use for evaluation.
"""
def __init__(
self,
llm: Optional[LLM] = None,
guidelines: Optional[str] = None,
eval_template: Optional[Union[str, BasePromptTemplate]] = None,
output_parser: Optional[PydanticOutputParser] = None,
) -> None:
self._llm = llm or Settings.llm
self._guidelines = guidelines or DEFAULT_GUIDELINES
self._eval_template: BasePromptTemplate
if isinstance(eval_template, str):
self._eval_template = PromptTemplate(eval_template)
else:
self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
self._output_parser = output_parser or PydanticOutputParser(
output_cls=EvaluationData
)
self._eval_template.output_parser = self._output_parser
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"eval_template": self._eval_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "eval_template" in prompts:
self._eval_template = prompts["eval_template"]
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
sleep_time_in_seconds: int = 0,
**kwargs: Any,
) -> EvaluationResult:
"""Evaluate whether the query and response pair passes the guidelines."""
del contexts # Unused
del kwargs # Unused
if query is None or response is None:
raise ValueError("query and response must be provided")
logger.debug("prompt: %s", self._eval_template)
logger.debug("query: %s", query)
logger.debug("response: %s", response)
logger.debug("guidelines: %s", self._guidelines)
await asyncio.sleep(sleep_time_in_seconds)
eval_response = await self._llm.apredict(
self._eval_template,
query=query,
response=response,
guidelines=self._guidelines,
)
eval_data = self._output_parser.parse(eval_response)
eval_data = cast(EvaluationData, eval_data)
return EvaluationResult(
query=query,
response=response,
passing=eval_data.passing,
score=1.0 if eval_data.passing else 0.0,
feedback=eval_data.feedback,
)
|
"""Guideline evaluation."""
import asyncio
import logging
from typing import Any, Optional, Sequence, Union, cast
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.llms.llm import LLM
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.settings import Settings
logger = logging.getLogger(__name__)
DEFAULT_GUIDELINES = (
"The response should fully answer the query.\n"
"The response should avoid being vague or ambiguous.\n"
"The response should be specific and use statistics or numbers when possible.\n"
)
DEFAULT_EVAL_TEMPLATE = PromptTemplate(
"Here is the original query:\n"
"Query: {query}\n"
"Critique the following response based on the guidelines below:\n"
"Response: {response}\n"
"Guidelines: {guidelines}\n"
"Now please provide constructive criticism.\n"
)
class EvaluationData(BaseModel):
passing: bool = Field(description="Whether the response passes the guidelines.")
feedback: str = Field(
description="The feedback for the response based on the guidelines."
)
class GuidelineEvaluator(BaseEvaluator):
"""Guideline evaluator.
Evaluates whether a query and response pair passes the given guidelines.
This evaluator only considers the query string and the response string.
Args:
guidelines(Optional[str]): User-added guidelines to use for evaluation.
Defaults to None, which uses the default guidelines.
eval_template(Optional[Union[str, BasePromptTemplate]] ):
The template to use for evaluation.
"""
def __init__(
self,
llm: Optional[LLM] = None,
guidelines: Optional[str] = None,
eval_template: Optional[Union[str, BasePromptTemplate]] = None,
output_parser: Optional[PydanticOutputParser] = None,
) -> None:
self._llm = llm or Settings.llm
self._guidelines = guidelines or DEFAULT_GUIDELINES
self._eval_template: BasePromptTemplate
if isinstance(eval_template, str):
self._eval_template = PromptTemplate(eval_template)
else:
self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
self._output_parser = output_parser or PydanticOutputParser(
output_cls=EvaluationData
)
self._eval_template.output_parser = self._output_parser
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {
"eval_template": self._eval_template,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "eval_template" in prompts:
self._eval_template = prompts["eval_template"]
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
sleep_time_in_seconds: int = 0,
**kwargs: Any,
) -> EvaluationResult:
"""Evaluate whether the query and response pair passes the guidelines."""
del contexts # Unused
del kwargs # Unused
if query is None or response is None:
raise ValueError("query and response must be provided")
logger.debug("prompt: %s", self._eval_template)
logger.debug("query: %s", query)
logger.debug("response: %s", response)
logger.debug("guidelines: %s", self._guidelines)
await asyncio.sleep(sleep_time_in_seconds)
eval_response = await self._llm.apredict(
self._eval_template,
query=query,
response=response,
guidelines=self._guidelines,
)
eval_data = self._output_parser.parse(eval_response)
eval_data = cast(EvaluationData, eval_data)
return EvaluationResult(
query=query,
response=response,
passing=eval_data.passing,
score=1.0 if eval_data.passing else 0.0,
feedback=eval_data.feedback,
)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# data
train_dataloader = dict(batch_size=8)
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
# Note: If the learning rate is set to 0.0025, the mAP will be 32.4.
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
# TODO: support auto scaling lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (1 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# data
data = dict(samples_per_gpu=8)
# optimizer
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
# Note: If the learning rate is set to 0.0025, the mAP will be 32.4.
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (1 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
# Owner(s): ["oncall: distributed"]
import json
import logging
import re
import sys
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_distributed import DistributedTestBase, TEST_SKIPS
from torch.testing._internal.common_fsdp import get_devtype
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
device_type = str(get_devtype())
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
WORLD_SIZE = min(4, max(2, torch.get_device_module(device_type).device_count()))
def with_comms(func=None):
if func is None:
return partial(
with_comms,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if torch.get_device_module(device_type).device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.create_pg(device_type)
func(self)
self.destroy_comms()
return wrapper
class C10dErrorLoggerTest(DistributedTestBase):
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
def test_get_or_create_logger(self):
self.assertIsNotNone(_c10d_logger)
self.assertEqual(1, len(_c10d_logger.handlers))
self.assertIsInstance(_c10d_logger.handlers[0], logging.NullHandler)
@_exception_logger
def _failed_broadcast_raise_exception(self):
tensor = torch.arange(2, dtype=torch.int64)
dist.broadcast(tensor, self.world_size + 1)
@_exception_logger
def _failed_broadcast_not_raise_exception(self):
try:
tensor = torch.arange(2, dtype=torch.int64)
dist.broadcast(tensor, self.world_size + 1)
except Exception:
pass
@with_comms
def test_exception_logger(self) -> None:
with self.assertRaises(Exception):
self._failed_broadcast_raise_exception()
with self.assertLogs(_c10d_logger, level="DEBUG") as captured:
self._failed_broadcast_not_raise_exception()
error_msg_dict = json.loads(
re.search("({.+})", captured.output[0]).group(0).replace("'", '"')
)
# NCCL adds additional nccl_version data to the error_msg_dict
if self.backend(device_type) == dist.Backend.NCCL:
self.assertEqual(len(error_msg_dict), 9)
else:
self.assertEqual(len(error_msg_dict), 8)
self.assertIn("pg_name", error_msg_dict.keys())
self.assertEqual("None", error_msg_dict["pg_name"])
self.assertIn("func_name", error_msg_dict.keys())
self.assertEqual("broadcast", error_msg_dict["func_name"])
self.assertIn("backend", error_msg_dict.keys())
self.assertEqual(self.backend(device_type), error_msg_dict["backend"])
if self.backend(device_type) == dist.Backend.NCCL:
self.assertIn("nccl_version", error_msg_dict.keys())
nccl_ver = torch.cuda.nccl.version()
self.assertEqual(
".".join(str(v) for v in nccl_ver), error_msg_dict["nccl_version"]
)
# In this test case, group_size = world_size, since we don't have multiple processes on one node.
self.assertIn("group_size", error_msg_dict.keys())
self.assertEqual(str(self.world_size), error_msg_dict["group_size"])
self.assertIn("world_size", error_msg_dict.keys())
self.assertEqual(str(self.world_size), error_msg_dict["world_size"])
self.assertIn("global_rank", error_msg_dict.keys())
self.assertIn(str(dist.get_rank()), error_msg_dict["global_rank"])
# In this test case, local_rank = global_rank, since we don't have multiple processes on one node.
self.assertIn("local_rank", error_msg_dict.keys())
self.assertIn(str(dist.get_rank()), error_msg_dict["local_rank"])
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["oncall: distributed"]
import json
import logging
import os
import re
import sys
from functools import partial, wraps
import torch
import torch.distributed as dist
from torch.distributed.c10d_logger import _c10d_logger, _exception_logger
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_distributed import MultiProcessTestCase, TEST_SKIPS
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
BACKEND = dist.Backend.NCCL
WORLD_SIZE = min(4, max(2, torch.cuda.device_count()))
def with_comms(func=None):
if func is None:
return partial(
with_comms,
)
@wraps(func)
def wrapper(self, *args, **kwargs):
if BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < self.world_size:
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
self.dist_init()
func(self)
self.destroy_comms()
return wrapper
class C10dErrorLoggerTest(MultiProcessTestCase):
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["BACKEND"] = BACKEND
self._spawn_processes()
@property
def device(self):
return (
torch.device(self.rank)
if BACKEND == dist.Backend.NCCL
else torch.device("cpu")
)
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def destroy_comms(self):
# Wait for all ranks to reach here before starting shutdown.
dist.barrier()
dist.destroy_process_group()
def dist_init(self):
dist.init_process_group(
backend=BACKEND,
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
# set device for nccl pg for collectives
if BACKEND == "nccl":
torch.cuda.set_device(self.rank)
def test_get_or_create_logger(self):
self.assertIsNotNone(_c10d_logger)
self.assertEqual(1, len(_c10d_logger.handlers))
self.assertIsInstance(_c10d_logger.handlers[0], logging.NullHandler)
@_exception_logger
def _failed_broadcast_raise_exception(self):
tensor = torch.arange(2, dtype=torch.int64)
dist.broadcast(tensor, self.world_size + 1)
@_exception_logger
def _failed_broadcast_not_raise_exception(self):
try:
tensor = torch.arange(2, dtype=torch.int64)
dist.broadcast(tensor, self.world_size + 1)
except Exception:
pass
@with_comms
def test_exception_logger(self) -> None:
with self.assertRaises(Exception):
self._failed_broadcast_raise_exception()
with self.assertLogs(_c10d_logger, level="DEBUG") as captured:
self._failed_broadcast_not_raise_exception()
error_msg_dict = json.loads(
re.search("({.+})", captured.output[0]).group(0).replace("'", '"')
)
self.assertEqual(len(error_msg_dict), 9)
self.assertIn("pg_name", error_msg_dict.keys())
self.assertEqual("None", error_msg_dict["pg_name"])
self.assertIn("func_name", error_msg_dict.keys())
self.assertEqual("broadcast", error_msg_dict["func_name"])
self.assertIn("backend", error_msg_dict.keys())
self.assertEqual("nccl", error_msg_dict["backend"])
self.assertIn("nccl_version", error_msg_dict.keys())
nccl_ver = torch.cuda.nccl.version()
self.assertEqual(
".".join(str(v) for v in nccl_ver), error_msg_dict["nccl_version"]
)
# In this test case, group_size = world_size, since we don't have multiple processes on one node.
self.assertIn("group_size", error_msg_dict.keys())
self.assertEqual(str(self.world_size), error_msg_dict["group_size"])
self.assertIn("world_size", error_msg_dict.keys())
self.assertEqual(str(self.world_size), error_msg_dict["world_size"])
self.assertIn("global_rank", error_msg_dict.keys())
self.assertIn(str(dist.get_rank()), error_msg_dict["global_rank"])
# In this test case, local_rank = global_rank, since we don't have multiple processes on one node.
self.assertIn("local_rank", error_msg_dict.keys())
self.assertIn(str(dist.get_rank()), error_msg_dict["local_rank"])
if __name__ == "__main__":
run_tests()
|
__all__ = ['filter_docs']
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocArray
from docarray.array.array.array import DocArray
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the index according to the given filter query.
---
```python
from docarray import DocArray, BaseDoc
from docarray.documents import TextDoc, ImageDoc
from docarray.utils.filter import filter_docs
class MyDocument(BaseDoc):
caption: TextDoc
ImageDoc: ImageDoc
price: int
docs = DocArray[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
ImageDoc=ImageDoc(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle',
ImageDoc=ImageDoc(url='turtlepic.png'),
price=50,
),
MyDocument(
caption='A couple birdwatching with binoculars',
ImageDoc=ImageDoc(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'ImageDoc__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter_docs(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].ImageDoc.url == 'binocularsphoto.png'
```
---
:param docs: the DocArray where to apply the filter
:param query: the query to filter by
:return: A DocArray containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils._internal.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocArray.__class_getitem__(docs.document_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocArray
from docarray.array.array.array import DocArray
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the index according to the given filter query.
EXAMPLE USAGE
.. code-block:: python
from docarray import DocArray, BaseDoc
from docarray.documents import Text, Image
from docarray.util.filter import filter_docs
class MyDocument(BaseDoc):
caption: Text
image: Image
price: int
docs = DocArray[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
image=Image(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle', image=Image(url='turtlepic.png'), price=50
),
MyDocument(
caption='A couple birdwatching with binoculars',
image=Image(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'image__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter_docs(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].image.url == 'binocularsphoto.png'
:param docs: the DocArray where to apply the filter
:param query: the query to filter by
:return: A DocArray containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils._internal.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocArray.__class_getitem__(docs.document_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert len(img.bytes_) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert len(img.bytes_) > 0
|
"""Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
try:
import simsimd as simd
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=np.float32)
Z = 1 - np.array(simd.cdist(X, Y, metric="cosine"))
return Z
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore[return-value,unused-ignore]
|
"""Math utils."""
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
try:
import simsimd as simd
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=np.float32)
Z = 1 - np.array(simd.cdist(X, Y, metric="cosine"))
return Z
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
def cosine_similarity_top_k(
X: Matrix,
Y: Matrix,
top_k: Optional[int] = 5,
score_threshold: Optional[float] = None,
) -> Tuple[List[Tuple[int, int]], List[float]]:
"""Row-wise cosine similarity with optional top-k and score threshold filtering.
Args:
X: Matrix.
Y: Matrix, same width as X.
top_k: Max number of results to return.
score_threshold: Minimum cosine similarity of results.
Returns:
Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),
second contains corresponding cosine similarities.
"""
if len(X) == 0 or len(Y) == 0:
return [], []
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore[return-value]
|
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={
"enable_logs": True,
},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
|
import logging
import sentry_sdk
from backend.util.settings import Settings
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.launchdarkly import LaunchDarklyIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={
"enable_logs": True,
},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
|
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina_commons.indexers.dump import import_metas, import_vectors
from ..mongo_storage import doc_without_embedding
NUM_DOCS = 10
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
def test_config():
config_path = Path(__file__).parents[1] / 'config.yml'
storage = Executor.load_config(str(config_path))
assert storage._traversal_paths == ['r']
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_add(docs_to_index, storage, docker_compose):
storage.add(docs=docs_to_index, parameters={})
assert storage.size == NUM_DOCS
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_update(docs_to_index, storage, docker_compose):
storage.add(docs=docs_to_index, parameters={})
doc_id_to_update = docs_to_index[0].id
storage.update(
docs=DocumentArray([Document(id=doc_id_to_update, text='hello test')])
)
docs_to_search = DocumentArray([Document(id=doc_id_to_update)])
storage.search(docs=docs_to_search)
assert docs_to_search[0].text == 'hello test'
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_search(docs_to_index, storage, docker_compose):
storage.add(docs=docs_to_index, parameters={})
docs_to_search = DocumentArray([Document(id=docs_to_index[0].id)])
storage.search(docs=docs_to_search)
assert docs_to_search[0].text == docs_to_index[0].text
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_search_without_embedding(
docs_to_index_no_embedding, storage, docker_compose
):
storage.add(docs=docs_to_index_no_embedding, parameters={})
docs_to_search = DocumentArray([Document(id=docs_to_index_no_embedding[0].id)])
storage.search(docs=docs_to_search)
assert docs_to_search[0].text == docs_to_index_no_embedding[0].text
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_delete(docs_to_index, storage, docker_compose):
doc_id_to_delete = docs_to_index[0].id
storage.delete(docs=DocumentArray([Document(id=doc_id_to_delete)]))
docs_to_search = DocumentArray([Document(id=doc_id_to_delete)])
assert len(docs_to_search) == 1
assert docs_to_search[0].text == ''
@pytest.mark.parametrize('num_shards', [1, 3, 5])
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_dump(docs_to_index, storage, tmpdir, num_shards, docker_compose):
metas = {'workspace': str(tmpdir), 'name': 'storage'}
dump_path = os.path.join(tmpdir, 'dump_dir')
with Flow().add(
uses={
'jtype': 'MongoDBStorage',
'with': {},
'metas': metas,
}
) as f:
f.index(inputs=docs_to_index)
f.post(on='/dump', parameters={'dump_path': dump_path, 'shards': num_shards})
assert os.path.exists(dump_path)
# recover dumped files and assert identical
all_ids = []
all_vecs = []
all_metas = []
for pea_id in range(num_shards):
ids, vecs = import_vectors(dump_path, pea_id=str(pea_id))
_, metas = import_metas(dump_path, pea_id=str(pea_id))
all_ids.extend(ids)
all_vecs.extend(vecs)
all_metas.extend(metas)
assert all_ids == docs_to_index.get_attributes('id')
assert (
np.asarray(all_vecs) == np.asarray(docs_to_index.get_attributes('embedding'))
).all()
for idx, meta in enumerate(all_metas):
assert meta == doc_without_embedding(docs_to_index[idx])
storage.delete(docs_to_index)
|
import os
from pathlib import Path
import pytest
import numpy as np
from jina import Document, DocumentArray, Flow, Executor
from jina_commons.indexers.dump import import_vectors, import_metas
from ..mongo_storage import doc_without_embedding
NUM_DOCS = 10
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
def test_config():
config_path = Path(__file__).parents[1] / 'config.yml'
storage = Executor.load_config(str(config_path))
assert storage._traversal_paths == ['r']
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_add(docs_to_index, storage, docker_compose):
storage.add(docs=docs_to_index, parameters={})
assert storage.size == NUM_DOCS
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_update(docs_to_index, storage, docker_compose):
storage.add(docs=docs_to_index, parameters={})
doc_id_to_update = docs_to_index[0].id
storage.update(
docs=DocumentArray([Document(id=doc_id_to_update, text='hello test')])
)
docs_to_search = DocumentArray([Document(id=doc_id_to_update)])
storage.search(docs=docs_to_search)
assert docs_to_search[0].text == 'hello test'
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_search(docs_to_index, storage, docker_compose):
storage.add(docs=docs_to_index, parameters={})
docs_to_search = DocumentArray([Document(id=docs_to_index[0].id)])
storage.search(docs=docs_to_search)
assert docs_to_search[0].text == docs_to_index[0].text
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_delete(docs_to_index, storage, docker_compose):
doc_id_to_delete = docs_to_index[0].id
storage.delete(docs=DocumentArray([Document(id=doc_id_to_delete)]))
docs_to_search = DocumentArray([Document(id=doc_id_to_delete)])
assert len(docs_to_search) == 1
assert docs_to_search[0].text == ''
@pytest.mark.parametrize('num_shards', [1, 3, 5])
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_mongo_dump(docs_to_index, storage, tmpdir, num_shards, docker_compose):
metas = {'workspace': str(tmpdir), 'name': 'storage'}
dump_path = os.path.join(tmpdir, 'dump_dir')
with Flow().add(
uses={
'jtype': 'MongoDBStorage',
'with': {},
'metas': metas,
}
) as f:
f.index(inputs=docs_to_index)
f.post(on='/dump', parameters={'dump_path': dump_path, 'shards': num_shards})
assert os.path.exists(dump_path)
# recover dumped files and assert identical
all_ids = []
all_vecs = []
all_metas = []
for pea_id in range(num_shards):
ids, vecs = import_vectors(dump_path, pea_id=str(pea_id))
_, metas = import_metas(dump_path, pea_id=str(pea_id))
all_ids.extend(ids)
all_vecs.extend(vecs)
all_metas.extend(metas)
assert all_ids == docs_to_index.get_attributes('id')
assert (
np.asarray(all_vecs) == np.asarray(docs_to_index.get_attributes('embedding'))
).all()
for idx, meta in enumerate(all_metas):
assert meta == doc_without_embedding(docs_to_index[idx])
storage.delete(docs_to_index)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.60
Model Sparsity: Active Dimensions: 112.3, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4450
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity: Active Dimensions: 113.6, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import os
import urllib
import numpy as np
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.6.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone: Union[ConfigDict, dict],
neck: Union[ConfigDict, dict],
bbox_head: Union[ConfigDict, dict],
train_cfg: Optional[Union[ConfigDict, dict]] = None,
test_cfg: Optional[Union[ConfigDict, dict]] = None,
preprocess_cfg: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
preprocess_cfg=None,
pretrained=None,
init_cfg=None):
super(RetinaNet,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
preprocess_cfg, pretrained, init_cfg)
|
r"""
PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
"""
import os
from typing import Any
from typing_extensions import TypeVarTuple, Unpack
from torch._C._autograd import _supported_activities, DeviceType, kineto_available
from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope
from torch._environment import is_fbcode
from torch.autograd.profiler import KinetoStepTracker, record_function
from torch.optim.optimizer import Optimizer, register_optimizer_step_post_hook
from .profiler import (
_KinetoProfile,
ExecutionTraceObserver,
profile,
ProfilerAction,
schedule,
supported_activities,
tensorboard_trace_handler,
)
__all__ = [
"profile",
"schedule",
"supported_activities",
"tensorboard_trace_handler",
"ProfilerAction",
"ProfilerActivity",
"kineto_available",
"DeviceType",
"record_function",
"ExecutionTraceObserver",
]
from . import itt
_Ts = TypeVarTuple("_Ts")
def _optimizer_post_hook(
optimizer: Optimizer, args: tuple[Unpack[_Ts]], kwargs: dict[str, Any]
) -> None:
KinetoStepTracker.increment_step("Optimizer")
if os.environ.get("KINETO_USE_DAEMON", "") or (
is_fbcode() and os.environ.get("KINETO_FORCE_OPTIMIZER_HOOK", "")
):
_ = register_optimizer_step_post_hook(_optimizer_post_hook)
|
# mypy: allow-untyped-defs
r"""
PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
"""
import os
from torch._C._autograd import _supported_activities, DeviceType, kineto_available
from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope
from torch._environment import is_fbcode
from torch.autograd.profiler import KinetoStepTracker, record_function
from torch.optim.optimizer import register_optimizer_step_post_hook
from .profiler import (
_KinetoProfile,
ExecutionTraceObserver,
profile,
ProfilerAction,
schedule,
supported_activities,
tensorboard_trace_handler,
)
__all__ = [
"profile",
"schedule",
"supported_activities",
"tensorboard_trace_handler",
"ProfilerAction",
"ProfilerActivity",
"kineto_available",
"DeviceType",
"record_function",
"ExecutionTraceObserver",
]
from . import itt
def _optimizer_post_hook(optimizer, args, kwargs):
KinetoStepTracker.increment_step("Optimizer")
if os.environ.get("KINETO_USE_DAEMON", "") or (
is_fbcode() and os.environ.get("KINETO_FORCE_OPTIMIZER_HOOK", "")
):
_ = register_optimizer_step_post_hook(_optimizer_post_hook)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
evaluation_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.8'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (center_of_mass, empty_instances, filter_scores_and_topk,
flip_tensor, generate_coordinate, images_to_levels,
interpolate_as, levels_to_images, mask2ndarray, multi_apply,
select_single_mlvl, sigmoid_geometric_mean, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .ema import ExpMomentumEMA
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import (empty_instances, interpolate_as, sigmoid_geometric_mean,
unpack_gt_instances)
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty',
'ExpMomentumEMA', 'unpack_gt_instances', 'empty_instances'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSingleStageInstanceSegmentor(TestCase):
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_light_r50_fpn_3x_coco.py',
'solov2/solov2_r50_fpn_1x_coco.py',
'solov2/solov2_light_r18_fpn_3x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_light_r18_fpn_3x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_light_r18_fpn_3x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
class TestSingleStageInstanceSegmentor(TestCase):
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_r50_fpn_1x_coco.py',
'solo/decoupled_solo_light_r50_fpn_3x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
self.assertEqual(detector.device.type, 'cpu')
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled_solo_light_r50_fpn_3x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_jaxarray')
class ImageJaxArray(JaxArray, AbstractImageTensor, metaclass=metaJax):
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_jaxarray')
class ImageJaxArray(JaxArray, AbstractImageTensor, metaclass=metaJax):
...
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.20.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.20.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class GFL(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class GFL(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
"""Xgboost training summary integration submodule."""
from dataclasses import dataclass, field
from typing import Dict, List
@dataclass
class XGBoostTrainingSummary:
"""
A class that holds the training and validation objective history
of an XGBoost model during its training process.
"""
train_objective_history: Dict[str, List[float]] = field(default_factory=dict)
validation_objective_history: Dict[str, List[float]] = field(default_factory=dict)
@staticmethod
def from_metrics(
metrics: Dict[str, Dict[str, List[float]]],
) -> "XGBoostTrainingSummary":
"""
Create an XGBoostTrainingSummary instance from a nested dictionary of metrics.
Parameters
----------
metrics : dict of str to dict of str to list of float
A dictionary containing training and validation metrics.
Example format:
{
"training": {"logloss": [0.1, 0.08]},
"validation": {"logloss": [0.12, 0.1]}
}
Returns
-------
A new instance of XGBoostTrainingSummary.
"""
train_objective_history = metrics.get("training", {})
validation_objective_history = metrics.get("validation", {})
return XGBoostTrainingSummary(
train_objective_history, validation_objective_history
)
|
"""Xgboost training summary integration submodule."""
from dataclasses import dataclass, field
from typing import Dict, List
@dataclass
class XGBoostTrainingSummary:
"""
A class that holds the training and validation objective history
of an XGBoost model during its training process.
"""
train_objective_history: Dict[str, List[float]] = field(default_factory=dict)
validation_objective_history: Dict[str, List[float]] = field(default_factory=dict)
@staticmethod
def from_metrics(
metrics: Dict[str, Dict[str, List[float]]]
) -> "XGBoostTrainingSummary":
"""
Create an XGBoostTrainingSummary instance from a nested dictionary of metrics.
Parameters
----------
metrics : dict of str to dict of str to list of float
A dictionary containing training and validation metrics.
Example format:
{
"training": {"logloss": [0.1, 0.08]},
"validation": {"logloss": [0.12, 0.1]}
}
Returns
-------
A new instance of XGBoostTrainingSummary.
"""
train_objective_history = metrics.get("training", {})
validation_objective_history = metrics.get("validation", {})
return XGBoostTrainingSummary(
train_objective_history, validation_objective_history
)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.2.dev1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='RandomSamplingNegPos',
tokenizer_name=_base_.lang_model_name,
num_sample_negative=85,
# change this
label_map_file='data/coco/annotations/lvis_v1_label_map_norare.json',
max_tokens=256),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities', 'tokens_positive', 'dataset_mode'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='ODVGDataset',
data_root=data_root,
need_text=False,
label_map_file='annotations/lvis_v1_label_map_norare.json',
ann_file='annotations/lvis_v1_train_od_norare.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
return_classes=True,
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
data_root=data_root,
type='LVISV1Dataset',
ann_file='annotations/lvis_v1_minival_inserted_image_name.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='LVISFixedAPMetric',
ann_file=data_root +
'annotations/lvis_v1_minival_inserted_image_name.json')
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.00005, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
# 'language_model': dict(lr_mult=0),
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=3)
default_hooks = dict(
checkpoint=dict(
max_keep_ckpts=3, save_best='lvis_fixed_ap/AP', rule='greater'))
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/lvis/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='RandomSamplingNegPos',
tokenizer_name=_base_.lang_model_name,
num_sample_negative=85,
# change this
label_map_file='data/lvis/annotations/lvis_v1_label_map_norare.json',
max_tokens=256),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities', 'tokens_positive', 'dataset_mode'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='ODVGDataset',
data_root=data_root,
need_text=False,
label_map_file='annotations/lvis_v1_label_map_norare.json',
ann_file='annotations/lvis_v1_train_od_norare.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
return_classes=True,
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
data_root=data_root,
type='LVISV1Dataset',
ann_file='annotations/lvis_v1_minival_inserted_image_name.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='LVISFixedAPMetric',
ann_file=data_root +
'annotations/lvis_v1_minival_inserted_image_name.json')
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.00005, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
# 'language_model': dict(lr_mult=0),
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=3)
default_hooks = dict(
checkpoint=dict(
max_keep_ckpts=3, save_best='lvis_fixed_ap/AP', rule='greater'))
load_from = 'epoch_30.pth'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook', 'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook'
]
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""Create a Dataset for *YesNo* [:footcite:`YesNo`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, List[int]): ``(waveform, sample_rate, labels)``
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""Create a Dataset for YesNo.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, List[int]): ``(waveform, sample_rate, labels)``
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Doc utilities: Utilities related to documentation
"""
import re
def replace_example_docstring(example_docstring):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
lines[i] = example_docstring
func_doc = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, "
f"current docstring is:\n{func_doc}"
)
fn.__doc__ = func_doc
return fn
return docstring_decorator
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Doc utilities: Utilities related to documentation
"""
import re
def replace_example_docstring(example_docstring):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
lines[i] = example_docstring
func_doc = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, "
f"current docstring is:\n{func_doc}"
)
fn.__doc__ = func_doc
return fn
return docstring_decorator
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseTripletLoss(model), document_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseTripletLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageNdArray, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageNdArray] = None
url: Optional[ImageUrl] = None
bytes: Optional[ImageBytes] = None
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageNdArray, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = (
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
)
_dynamic_imports = {
"AsyncCallbackHandler": "base",
"BaseCallbackHandler": "base",
"BaseCallbackManager": "base",
"CallbackManagerMixin": "base",
"Callbacks": "base",
"ChainManagerMixin": "base",
"LLMManagerMixin": "base",
"RetrieverManagerMixin": "base",
"RunManagerMixin": "base",
"ToolManagerMixin": "base",
"FileCallbackHandler": "file",
"AsyncCallbackManager": "manager",
"AsyncCallbackManagerForChainGroup": "manager",
"AsyncCallbackManagerForChainRun": "manager",
"AsyncCallbackManagerForLLMRun": "manager",
"AsyncCallbackManagerForRetrieverRun": "manager",
"AsyncCallbackManagerForToolRun": "manager",
"AsyncParentRunManager": "manager",
"AsyncRunManager": "manager",
"BaseRunManager": "manager",
"CallbackManager": "manager",
"CallbackManagerForChainGroup": "manager",
"CallbackManagerForChainRun": "manager",
"CallbackManagerForLLMRun": "manager",
"CallbackManagerForRetrieverRun": "manager",
"CallbackManagerForToolRun": "manager",
"ParentRunManager": "manager",
"RunManager": "manager",
"adispatch_custom_event": "manager",
"dispatch_custom_event": "manager",
"StdOutCallbackHandler": "stdout",
"StreamingStdOutCallbackHandler": "streaming_stdout",
"UsageMetadataCallbackHandler": "usage",
"get_usage_metadata_callback": "usage",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = [
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
]
_dynamic_imports = {
"AsyncCallbackHandler": "base",
"BaseCallbackHandler": "base",
"BaseCallbackManager": "base",
"CallbackManagerMixin": "base",
"Callbacks": "base",
"ChainManagerMixin": "base",
"LLMManagerMixin": "base",
"RetrieverManagerMixin": "base",
"RunManagerMixin": "base",
"ToolManagerMixin": "base",
"FileCallbackHandler": "file",
"AsyncCallbackManager": "manager",
"AsyncCallbackManagerForChainGroup": "manager",
"AsyncCallbackManagerForChainRun": "manager",
"AsyncCallbackManagerForLLMRun": "manager",
"AsyncCallbackManagerForRetrieverRun": "manager",
"AsyncCallbackManagerForToolRun": "manager",
"AsyncParentRunManager": "manager",
"AsyncRunManager": "manager",
"BaseRunManager": "manager",
"CallbackManager": "manager",
"CallbackManagerForChainGroup": "manager",
"CallbackManagerForChainRun": "manager",
"CallbackManagerForLLMRun": "manager",
"CallbackManagerForRetrieverRun": "manager",
"CallbackManagerForToolRun": "manager",
"ParentRunManager": "manager",
"RunManager": "manager",
"adispatch_custom_event": "manager",
"dispatch_custom_event": "manager",
"StdOutCallbackHandler": "stdout",
"StreamingStdOutCallbackHandler": "streaming_stdout",
"UsageMetadataCallbackHandler": "usage",
"get_usage_metadata_callback": "usage",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class PaintByExampleImageEncoder(CLIPPreTrainedModel):
def __init__(self, config, proj_size=None):
super().__init__(config)
self.proj_size = proj_size or getattr(config, "projection_dim", 768)
self.model = CLIPVisionModel(config)
self.mapper = PaintByExampleMapper(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size)
self.proj_out = nn.Linear(config.hidden_size, self.proj_size)
# uncondition for scaling
self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size)))
def forward(self, pixel_values, return_uncond_vector=False):
clip_output = self.model(pixel_values=pixel_values)
latent_states = clip_output.pooler_output
latent_states = self.mapper(latent_states[:, None])
latent_states = self.final_layer_norm(latent_states)
latent_states = self.proj_out(latent_states)
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class PaintByExampleMapper(nn.Module):
def __init__(self, config):
super().__init__()
num_layers = (config.num_hidden_layers + 1) // 5
hid_size = config.hidden_size
num_heads = 1
self.blocks = nn.ModuleList(
[
BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn="gelu", attention_bias=True)
for _ in range(num_layers)
]
)
def forward(self, hidden_states):
for block in self.blocks:
hidden_states = block(hidden_states)
return hidden_states
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class PaintByExampleImageEncoder(CLIPPreTrainedModel):
def __init__(self, config, proj_size=None):
super().__init__(config)
self.proj_size = proj_size or getattr(config, "projection_dim", 768)
self.model = CLIPVisionModel(config)
self.mapper = PaintByExampleMapper(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size)
self.proj_out = nn.Linear(config.hidden_size, self.proj_size)
# uncondition for scaling
self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size)))
def forward(self, pixel_values, return_uncond_vector=False):
clip_output = self.model(pixel_values=pixel_values)
latent_states = clip_output.pooler_output
latent_states = self.mapper(latent_states[:, None])
latent_states = self.final_layer_norm(latent_states)
latent_states = self.proj_out(latent_states)
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class PaintByExampleMapper(nn.Module):
def __init__(self, config):
super().__init__()
num_layers = (config.num_hidden_layers + 1) // 5
hid_size = config.hidden_size
num_heads = 1
self.blocks = nn.ModuleList(
[
BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn="gelu", attention_bias=True)
for _ in range(num_layers)
]
)
def forward(self, hidden_states):
for block in self.blocks:
hidden_states = block(hidden_states)
return hidden_states
|
"""Fake LLM wrapper for testing purposes."""
from collections.abc import Mapping
from typing import Any, Optional, cast
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import model_validator
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None
sequential_responses: Optional[bool] = False
response_index: int = 0
@model_validator(mode="before")
@classmethod
def check_queries_required(cls, values: dict) -> dict:
if values.get("sequential_response") and not values.get("queries"):
msg = "queries is required when sequential_response is set to True"
raise ValueError(msg)
return values
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def _call(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> dict[str, Any]:
return {}
@property
def _get_next_response_in_sequence(self) -> str:
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
|
"""Fake LLM wrapper for testing purposes."""
from collections.abc import Mapping
from typing import Any, Optional, cast
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from pydantic import model_validator
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None
sequential_responses: Optional[bool] = False
response_index: int = 0
@model_validator(mode="before")
@classmethod
def check_queries_required(cls, values: dict) -> dict:
if values.get("sequential_response") and not values.get("queries"):
raise ValueError(
"queries is required when sequential_response is set to True"
)
return values
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
def _call(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> dict[str, Any]:
return {}
@property
def _get_next_response_in_sequence(self) -> str:
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> list[Document]:
raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any, Dict, List
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain, StuffDocumentsChain
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun
) -> List[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question, k=self.k, **self.search_kwargs
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self, inputs: Dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun
) -> List[Document]:
raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: Dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
|
from __future__ import annotations
import csv
import gzip
import os
from collections.abc import Generator
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import CrossEncoder, util
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentence_transformers.readers import InputExample
from sentence_transformers.util import is_training_available
if not is_training_available():
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture()
def sts_resource() -> Generator[tuple[list[InputExample], list[InputExample]], None, None]:
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
stsb_train_samples = []
stsb_test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
stsb_test_samples.append(inp_example)
elif row["split"] == "train":
stsb_train_samples.append(inp_example)
yield stsb_train_samples, stsb_test_samples
def evaluate_stsb_test(
distilroberta_base_ce_model: CrossEncoder,
expected_score: float,
test_samples: list[InputExample],
num_test_samples: int = -1,
) -> None:
model = distilroberta_base_ce_model
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(test_samples[:num_test_samples], name="sts-test")
scores = evaluator(model)
score = scores[evaluator.primary_metric] * 100
print(f"STS-Test Performance: {score:.2f} vs. exp: {expected_score:.2f}")
assert score > expected_score or abs(score - expected_score) < 0.1
def test_pretrained_stsb(sts_resource: tuple[list[InputExample], list[InputExample]]):
_, sts_test_samples = sts_resource
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
evaluate_stsb_test(model, 87.92, sts_test_samples)
@pytest.mark.slow
def test_train_stsb_slow(
distilroberta_base_ce_model: CrossEncoder, sts_resource: tuple[list[InputExample], list[InputExample]]
) -> None:
model = distilroberta_base_ce_model
sts_train_samples, sts_test_samples = sts_resource
train_dataloader = DataLoader(sts_train_samples, shuffle=True, batch_size=16)
model.fit(
train_dataloader=train_dataloader,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
)
evaluate_stsb_test(model, 75, sts_test_samples)
def test_train_stsb(
distilroberta_base_ce_model: CrossEncoder, sts_resource: tuple[list[InputExample], list[InputExample]]
) -> None:
model = distilroberta_base_ce_model
sts_train_samples, sts_test_samples = sts_resource
train_dataloader = DataLoader(sts_train_samples[:500], shuffle=True, batch_size=16)
model.fit(
train_dataloader=train_dataloader,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
)
evaluate_stsb_test(model, 50, sts_test_samples, num_test_samples=100)
|
from __future__ import annotations
import csv
import gzip
import os
from collections.abc import Generator
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import CrossEncoder, util
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentence_transformers.readers import InputExample
@pytest.fixture()
def sts_resource() -> Generator[tuple[list[InputExample], list[InputExample]], None, None]:
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
stsb_train_samples = []
stsb_test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "test":
stsb_test_samples.append(inp_example)
elif row["split"] == "train":
stsb_train_samples.append(inp_example)
yield stsb_train_samples, stsb_test_samples
def evaluate_stsb_test(
distilroberta_base_ce_model: CrossEncoder,
expected_score: float,
test_samples: list[InputExample],
num_test_samples: int = -1,
) -> None:
model = distilroberta_base_ce_model
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(test_samples[:num_test_samples], name="sts-test")
scores = evaluator(model)
score = scores[evaluator.primary_metric] * 100
print(f"STS-Test Performance: {score:.2f} vs. exp: {expected_score:.2f}")
assert score > expected_score or abs(score - expected_score) < 0.1
def test_pretrained_stsb(sts_resource: tuple[list[InputExample], list[InputExample]]):
_, sts_test_samples = sts_resource
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
evaluate_stsb_test(model, 87.92, sts_test_samples)
@pytest.mark.slow
def test_train_stsb_slow(
distilroberta_base_ce_model: CrossEncoder, sts_resource: tuple[list[InputExample], list[InputExample]]
) -> None:
model = distilroberta_base_ce_model
sts_train_samples, sts_test_samples = sts_resource
train_dataloader = DataLoader(sts_train_samples, shuffle=True, batch_size=16)
model.fit(
train_dataloader=train_dataloader,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
)
evaluate_stsb_test(model, 75, sts_test_samples)
def test_train_stsb(
distilroberta_base_ce_model: CrossEncoder, sts_resource: tuple[list[InputExample], list[InputExample]]
) -> None:
model = distilroberta_base_ce_model
sts_train_samples, sts_test_samples = sts_resource
train_dataloader = DataLoader(sts_train_samples[:500], shuffle=True, batch_size=16)
model.fit(
train_dataloader=train_dataloader,
epochs=1,
warmup_steps=int(len(train_dataloader) * 0.1),
)
evaluate_stsb_test(model, 50, sts_test_samples, num_test_samples=100)
|
"""Standard LangChain interface tests for Responses API"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
class TestOpenAIResponses(TestOpenAIStandard):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "use_responses_api": True}
@pytest.mark.xfail(reason="Unsupported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
"""Standard LangChain interface tests for Responses API"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
class TestOpenAIResponses(TestOpenAIStandard):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "use_responses_api": True}
@pytest.mark.xfail(reason="Unsupported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"parallel_backend",
"register_parallel_backend",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"murmurhash3_32",
"as_float_array",
"assert_all_finite",
"check_array",
"check_random_state",
"compute_class_weight",
"compute_sample_weight",
"column_or_1d",
"check_consistent_length",
"check_X_y",
"check_scalar",
"indexable",
"check_symmetric",
"deprecated",
"parallel_backend",
"register_parallel_backend",
"resample",
"shuffle",
"all_estimators",
"DataConversionWarning",
"estimator_html_repr",
"Bunch",
"metadata_routing",
"safe_sqr",
"safe_mask",
"gen_batches",
"gen_even_slices",
"Tags",
"InputTags",
"TargetTags",
"ClassifierTags",
"RegressorTags",
"TransformerTags",
"get_tags",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
@mmcv.jit(coderize=True)
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
@mmcv.jit(coderize=True)
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(0) == bboxes.size(0)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
# Get outputs x, y
x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center
y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center
w_pred = torch.exp(pred_bboxes[..., 2]) * w
h_pred = torch.exp(pred_bboxes[..., 3]) * h
decoded_bboxes = torch.stack(
(x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
dim=-1)
return decoded_bboxes
|
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
@mmcv.jit(coderize=True)
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
@mmcv.jit(coderize=True)
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(0) == bboxes.size(0)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
# Get outputs x, y
x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center
y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center
w_pred = torch.exp(pred_bboxes[..., 2]) * w
h_pred = torch.exp(pred_bboxes[..., 3]) * h
decoded_bboxes = torch.stack(
(x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
dim=-1)
return decoded_bboxes
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._set_single_port_protocol()
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import os
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
|
import os
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'executor-merger': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.