input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import numpy as np
import orjson
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import NdArray
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(NdArray[128], np.zeros(128))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(NdArray[3, 224, 224], np.zeros((3, 224, 224)))
assert isinstance(tensor, NdArray)
assert isinstance(tensor, np.ndarray)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(NdArray[3, 224, 224], np.zeros((224, 224)))
|
import numpy as np
import orjson
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import NdArray
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
|
"""
Make.com API wrapper.
Currently cannot load documents.
"""
from typing import Any, List, Optional
import requests
from llama_index.core.base.response.schema import Response
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, NodeWithScore, TextNode
class MakeWrapper(BaseReader):
"""Make reader."""
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""
Load data from the input directory.
NOTE: This is not implemented.
"""
raise NotImplementedError("Cannot load documents from Make.com API.")
def pass_response_to_webhook(
self, webhook_url: str, response: Response, query: Optional[str] = None
) -> None:
"""
Pass response object to webhook.
Args:
webhook_url (str): Webhook URL.
response (Response): Response object.
query (Optional[str]): Query. Defaults to None.
"""
response_text = response.response
source_nodes = [n.dict() for n in response.source_nodes]
json_dict = {
"response": response_text,
"source_nodes": source_nodes,
"query": query,
}
r = requests.post(webhook_url, json=json_dict)
r.raise_for_status()
if __name__ == "__main__":
wrapper = MakeWrapper()
test_response = Response(
response="test response",
source_nodes=[NodeWithScore(node=TextNode(text="test source", id_="test id"))],
)
wrapper.pass_response_to_webhook(
"https://hook.us1.make.com/asdfadsfasdfasdfd",
test_response,
"Test query",
)
|
"""Make.com API wrapper.
Currently cannot load documents.
"""
from typing import Any, List, Optional
import requests
from llama_index.core.base.response.schema import Response
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document, NodeWithScore, TextNode
class MakeWrapper(BaseReader):
"""Make reader."""
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory.
NOTE: This is not implemented.
"""
raise NotImplementedError("Cannot load documents from Make.com API.")
def pass_response_to_webhook(
self, webhook_url: str, response: Response, query: Optional[str] = None
) -> None:
"""Pass response object to webhook.
Args:
webhook_url (str): Webhook URL.
response (Response): Response object.
query (Optional[str]): Query. Defaults to None.
"""
response_text = response.response
source_nodes = [n.dict() for n in response.source_nodes]
json_dict = {
"response": response_text,
"source_nodes": source_nodes,
"query": query,
}
r = requests.post(webhook_url, json=json_dict)
r.raise_for_status()
if __name__ == "__main__":
wrapper = MakeWrapper()
test_response = Response(
response="test response",
source_nodes=[NodeWithScore(node=TextNode(text="test source", id_="test id"))],
)
wrapper.pass_response_to_webhook(
"https://hook.us1.make.com/asdfadsfasdfasdfd",
test_response,
"Test query",
)
|
_base_ = '../fast_rcnn/fast-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
# MMEngine support the following two ways, users can choose
# according to convenience
# train_dataloader = dict(dataset=dict(proposal_file='proposals/crpn_r50_caffe_fpn_1x_train2017.pkl')) # noqa
_base_.train_dataloader.dataset.proposal_file = 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl' # noqa
# val_dataloader = dict(dataset=dict(proposal_file='proposals/crpn_r50_caffe_fpn_1x_val2017.pkl')) # noqa
# test_dataloader = val_dataloader
_base_.val_dataloader.dataset.proposal_file = 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl' # noqa
test_dataloader = _base_.val_dataloader
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO support proposals input
data = dict(
train=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
import pytest
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import ToolSelection
from llama_index.core.bridge.pydantic import BaseModel, ValidationError
from llama_index.core.agent.workflow.workflow_events import (
AgentWorkflowStartEvent,
AgentOutput,
PydanticConversionWarning,
)
from llama_index.core.memory import Memory
@pytest.fixture()
def example_agent_output() -> dict:
return {
"response": ChatMessage(role="user", content="30 times 2 is 60."),
"tool_calls": [
ToolSelection(
tool_id="1", tool_name="multiply", tool_kwargs={"i": 30, "j": 2}
)
],
"raw": '{"role": "user", "content": "30 times 2 is 60."}',
"structured_response": {"operation": "30 times 2", "result": 60},
"current_agent_name": "CalculatorAgent",
}
class MathResult(BaseModel):
operation: str
result: int
class WrongMathResult(BaseModel):
operation: str
result: str
def test_agent_workflow_start_event():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[ChatMessage(role="user", content="Hello, world!")],
max_iterations=10,
)
assert event.user_msg == "Hello, world!"
assert event.chat_history[0].role.value == "user"
assert event.chat_history[0].content == "Hello, world!"
assert event.max_iterations == 10
def test_agent_workflow_start_event_with_dict():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[{"role": "user", "content": "Hello, world!"}],
max_iterations=10,
)
assert event.user_msg == "Hello, world!"
assert event.chat_history[0].role.value == "user"
assert event.chat_history[0].content == "Hello, world!"
assert event.max_iterations == 10
def test_agent_workflow_start_event_to_dict():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[ChatMessage(role="user", content="Hello, world!")],
max_iterations=10,
memory=Memory.from_defaults(),
)
# Memory is not included in the dump
dump = event.model_dump()
assert len(dump) == 3
assert dump["user_msg"] == "Hello, world!"
assert dump["chat_history"][0]["role"] == "user"
assert dump["chat_history"][0]["blocks"][0]["text"] == "Hello, world!"
assert dump["max_iterations"] == 10
def test_agent_output_with_structured_response(example_agent_output: dict) -> None:
try:
agent_output = AgentOutput.model_validate(example_agent_output)
success = True
except ValidationError:
success = False
assert success
assert agent_output.get_pydantic_model(MathResult) == MathResult.model_validate(
example_agent_output["structured_response"]
)
with pytest.warns(PydanticConversionWarning):
a = agent_output.get_pydantic_model(WrongMathResult)
assert a is None
|
from llama_index.core.llms import ChatMessage
from llama_index.core.agent.workflow.workflow_events import AgentWorkflowStartEvent
from llama_index.core.memory import Memory
def test_agent_workflow_start_event():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[ChatMessage(role="user", content="Hello, world!")],
max_iterations=10,
)
assert event.user_msg == "Hello, world!"
assert event.chat_history[0].role.value == "user"
assert event.chat_history[0].content == "Hello, world!"
assert event.max_iterations == 10
def test_agent_workflow_start_event_with_dict():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[{"role": "user", "content": "Hello, world!"}],
max_iterations=10,
)
assert event.user_msg == "Hello, world!"
assert event.chat_history[0].role.value == "user"
assert event.chat_history[0].content == "Hello, world!"
assert event.max_iterations == 10
def test_agent_workflow_start_event_to_dict():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[ChatMessage(role="user", content="Hello, world!")],
max_iterations=10,
memory=Memory.from_defaults(),
)
# Memory is not included in the dump
dump = event.model_dump()
assert len(dump) == 3
assert dump["user_msg"] == "Hello, world!"
assert dump["chat_history"][0]["role"] == "user"
assert dump["chat_history"][0]["blocks"][0]["text"] == "Hello, world!"
assert dump["max_iterations"] == 10
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'DABDETRHead'
]
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import SanaTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SanaTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = SanaTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.7, 0.9]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 32
width = 32
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"attention_head_dim": 4,
"num_attention_heads": 2,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"SanaTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import SanaTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SanaTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = SanaTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.7, 0.9]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 32
width = 32
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 32, 32)
@property
def output_shape(self):
return (4, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"attention_head_dim": 4,
"num_attention_heads": 2,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"SanaTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
- This loss is never used standalone, but instead used within the :class:`CSRLoss` class. See that loss for more details.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
total_loss: The total reconstruction loss value
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_metric import (
AnswerConsistencyMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyEvaluator(BaseEvaluator):
"""
Tonic Validate's answer consistency metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_metric import (
AnswerConsistencyMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyEvaluator(BaseEvaluator):
"""Tonic Validate's answer consistency metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.random.random import beta as beta
from keras.src.random.random import binomial as binomial
from keras.src.random.random import categorical as categorical
from keras.src.random.random import dropout as dropout
from keras.src.random.random import gamma as gamma
from keras.src.random.random import normal as normal
from keras.src.random.random import randint as randint
from keras.src.random.random import shuffle as shuffle
from keras.src.random.random import truncated_normal as truncated_normal
from keras.src.random.random import uniform as uniform
from keras.src.random.seed_generator import SeedGenerator as SeedGenerator
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.random.random import beta
from keras.src.random.random import binomial
from keras.src.random.random import categorical
from keras.src.random.random import dropout
from keras.src.random.random import gamma
from keras.src.random.random import normal
from keras.src.random.random import randint
from keras.src.random.random import shuffle
from keras.src.random.random import truncated_normal
from keras.src.random.random import uniform
from keras.src.random.seed_generator import SeedGenerator
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(scope='function')
def test_envs(tmpdir):
os.environ['JINA_HUB_ROOT'] = str(tmpdir)
os.environ['JINA_HUB_CACHE_DIR'] = str(tmpdir)
yield None
del os.environ['JINA_HUB_ROOT']
del os.environ['JINA_HUB_CACHE_DIR']
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
from typing import Literal
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ToolsIntegrationTests
from langchain_tests.unit_tests import ToolsUnitTests
class ParrotMultiplyTool(BaseTool):
name: str = "ParrotMultiplyTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
def _run(self, a: int, b: int) -> int:
return a * b + 80
class ParrotMultiplyArtifactTool(BaseTool):
name: str = "ParrotMultiplyArtifactTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(self, a: int, b: int) -> tuple[int, str]:
return a * b + 80, "parrot artifact"
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyArtifactToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyArtifactTool]:
return ParrotMultiplyArtifactTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
from typing import Literal
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ToolsIntegrationTests
from langchain_tests.unit_tests import ToolsUnitTests
class ParrotMultiplyTool(BaseTool):
name: str = "ParrotMultiplyTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
def _run(self, a: int, b: int) -> int:
return a * b + 80
class ParrotMultiplyArtifactTool(BaseTool):
name: str = "ParrotMultiplyArtifactTool"
description: str = (
"Multiply two numbers like a parrot. Parrots always add eighty for their matey."
)
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(self, a: int, b: int) -> tuple[int, str]:
return a * b + 80, "parrot artifact"
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyTool]:
return ParrotMultiplyTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyArtifactToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> type[ParrotMultiplyArtifactTool]:
return ParrotMultiplyArtifactTool
@property
def tool_constructor_params(self) -> dict:
# if your tool constructor instead required initialization arguments like
# `def __init__(self, some_arg: int):`, you would return those here
# as a dictionary, e.g.: `return {'some_arg': 42}`
return {}
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class NetmindLLM(OpenAILike):
"""
Netmind LLM.
Examples:
`pip install llama-index-llms-netmind`
```python
from llama_index.llms.netmind import NetmindLLM
# set api key in env or in llm
# import os
# os.environ["NETMIND_API_KEY"] = "your api key"
llm = NetmindLLM(
model="meta-llama/Llama-3.3-70B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.netmind.ai/inference-api/openai/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NETMIND_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "NetmindLLM"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class NetmindLLM(OpenAILike):
"""Netmind LLM.
Examples:
`pip install llama-index-llms-netmind`
```python
from llama_index.llms.netmind import NetmindLLM
# set api key in env or in llm
# import os
# os.environ["NETMIND_API_KEY"] = "your api key"
llm = NetmindLLM(
model="meta-llama/Llama-3.3-70B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.netmind.ai/inference-api/openai/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NETMIND_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "NetmindLLM"
|
"""Oxylabs Web Reader."""
import asyncio
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from platform import architecture, python_version
from importlib.metadata import version
from llama_index.core.bridge.pydantic import Field
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from markdownify import markdownify
from llama_index.readers.web.oxylabs_web.utils import strip_html, json_to_markdown
if TYPE_CHECKING:
from oxylabs.internal.api import AsyncAPI, RealtimeAPI
def get_default_config() -> dict[str, Any]:
from oxylabs.utils.utils import prepare_config
return prepare_config(async_integration=True)
class OxylabsWebReader(BasePydanticReader):
"""
Scrape any website with Oxylabs Scraper.
Oxylabs API documentation:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites
Args:
username: Oxylabs username.
password: Oxylabs password.
Example:
.. code-block:: python
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
reader = OxylabsWebReader(
username=os.environ["OXYLABS_USERNAME"], password=os.environ["OXYLABS_PASSWORD"]
)
docs = reader.load_data(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2"
],
{
"parse": True,
}
)
print(docs[0].text)
"""
timeout_s: int = 100
oxylabs_scraper_url: str = "https://realtime.oxylabs.io/v1/queries"
api: "RealtimeAPI"
async_api: "AsyncAPI"
default_config: dict[str, Any] = Field(default_factory=get_default_config)
def __init__(self, username: str, password: str, **kwargs) -> None:
from oxylabs.internal.api import AsyncAPI, APICredentials, RealtimeAPI
credentials = APICredentials(username=username, password=password)
bits, _ = architecture()
sdk_type = (
f"oxylabs-llama-index-web-sdk-python/"
f"{version('llama-index-readers-web')} "
f"({python_version()}; {bits})"
)
api = RealtimeAPI(credentials, sdk_type=sdk_type)
async_api = AsyncAPI(credentials, sdk_type=sdk_type)
super().__init__(api=api, async_api=async_api, **kwargs)
@classmethod
def class_name(cls) -> str:
return "OxylabsWebReader"
def _get_document_from_response(self, response: dict[str, Any]) -> Document:
content = response["results"][0]["content"]
if isinstance(content, (dict, list)):
text = json_to_markdown(content)
else:
striped_html = strip_html(str(content))
text = markdownify(striped_html)
return Document(
metadata={"oxylabs_job": response["job"]},
text=text,
)
async def aload_data(
self,
urls: list[str],
additional_params: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""
Asynchronously load data from urls.
Args:
urls: List of URLs to load.
additional_params: Dictionary with the scraper parameters. Accepts the values from
the additional parameters described here:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional
"""
if additional_params is None:
additional_params = {}
responses = await asyncio.gather(
*[
self.async_api.get_response(
{**additional_params, "url": url},
self.default_config,
)
for url in urls
]
)
return [
self._get_document_from_response(response)
for response in responses
if response
]
def load_data(
self,
urls: list[str],
additional_params: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""
Load data from urls.
Args:
urls: List of URLs to load.
additional_params: Dictionary with the scraper parameters. Accepts the values from
the additional parameters described here:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional
"""
if additional_params is None:
additional_params = {}
responses = [
self.api.get_response(
{**additional_params, "url": url},
self.default_config,
)
for url in urls
]
return [
self._get_document_from_response(response)
for response in responses
if response
]
|
"""Oxylabs Web Reader."""
import asyncio
from typing import Any, List
from platform import architecture, python_version
from importlib.metadata import version
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from markdownify import markdownify
from llama_index.readers.web.oxylabs_web.utils import strip_html, json_to_markdown
from oxylabs.utils.utils import prepare_config
from oxylabs.internal.api import AsyncAPI, APICredentials, RealtimeAPI
class OxylabsWebReader(BasePydanticReader):
"""
Scrape any website with Oxylabs Scraper.
Oxylabs API documentation:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites
Args:
username: Oxylabs username.
password: Oxylabs password.
Example:
.. code-block:: python
from llama_index.readers.web.oxylabs_web.base import OxylabsWebReader
reader = OxylabsWebReader(
username=os.environ["OXYLABS_USERNAME"], password=os.environ["OXYLABS_PASSWORD"]
)
docs = reader.load_data(
[
"https://sandbox.oxylabs.io/products/1",
"https://sandbox.oxylabs.io/products/2"
],
{
"parse": True,
}
)
print(docs[0].text)
"""
timeout_s: int = 100
oxylabs_scraper_url: str = "https://realtime.oxylabs.io/v1/queries"
api: RealtimeAPI
async_api: AsyncAPI
default_config: dict[str, Any] = prepare_config(async_integration=True)
def __init__(self, username: str, password: str, **kwargs) -> None:
credentials = APICredentials(username=username, password=password)
bits, _ = architecture()
sdk_type = (
f"oxylabs-llama-index-web-sdk-python/"
f"{version('llama-index-readers-web')} "
f"({python_version()}; {bits})"
)
api = RealtimeAPI(credentials, sdk_type=sdk_type)
async_api = AsyncAPI(credentials, sdk_type=sdk_type)
super().__init__(api=api, async_api=async_api, **kwargs)
@classmethod
def class_name(cls) -> str:
return "OxylabsWebReader"
def _get_document_from_response(self, response: dict[str, Any]) -> Document:
content = response["results"][0]["content"]
if isinstance(content, (dict, list)):
text = json_to_markdown(content)
else:
striped_html = strip_html(str(content))
text = markdownify(striped_html)
return Document(
metadata={"oxylabs_job": response["job"]},
text=text,
)
async def aload_data(
self,
urls: list[str],
additional_params: dict[str, Any] | None = None,
) -> List[Document]:
"""
Asynchronously load data from urls.
Args:
urls: List of URLs to load.
additional_params: Dictionary with the scraper parameters. Accepts the values from
the additional parameters described here:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional
"""
if additional_params is None:
additional_params = {}
responses = await asyncio.gather(
*[
self.async_api.get_response(
{**additional_params, "url": url},
self.default_config,
)
for url in urls
]
)
return [
self._get_document_from_response(response)
for response in responses
if response
]
def load_data(
self,
urls: list[str],
additional_params: dict[str, Any] | None = None,
) -> List[Document]:
"""
Load data from urls.
Args:
urls: List of URLs to load.
additional_params: Dictionary with the scraper parameters. Accepts the values from
the additional parameters described here:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/generic-target#additional
"""
if additional_params is None:
additional_params = {}
responses = [
self.api.get_response(
{**additional_params, "url": url},
self.default_config,
)
for url in urls
]
return [
self._get_document_from_response(response)
for response in responses
if response
]
|
import re
from typing import Any, Dict, Union
from sentence_transformers import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
# TODO: Add better `primary_metrics` support
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Union[float, Dict[str, float]]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
:param model:
the model to evaluate
:param output_path:
path where predictions and metrics are written to
:param epoch
the epoch where the evaluation takes place.
This is used for the file prefixes.
If this is -1, then we assume evaluation on test data.
:param steps
the steps in the current epoch at time of the evaluation.
This is used for the file prefixes.
If this is -1, then we assume evaluation at the end of the epoch.
:return: Either a score for the evaluation with a higher score indicating a better result,
or a dictionary with scores. If the latter is chosen, then `evaluator.primary_metric`
must be defined
"""
pass
def prefix_name_to_metrics(self, metrics: Dict[str, float], name: str):
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: Dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", "\g<1> \g<2>", class_name)
|
from sentence_transformers import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
:param model:
the model to evaluate
:param output_path:
path where predictions and metrics are written to
:param epoch
the epoch where the evaluation takes place.
This is used for the file prefixes.
If this is -1, then we assume evaluation on test data.
:param steps
the steps in the current epoch at time of the evaluation.
This is used for the file prefixes.
If this is -1, then we assume evaluation at the end of the epoch.
:return: a score for the evaluation with a higher score indicating a better result
"""
pass
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, Path]], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(text=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import CascadeRoIHead
from .double_roi_head import DoubleHeadRoIHead
from .dynamic_roi_head import DynamicRoIHead
from .grid_roi_head import GridRoIHead
from .htc_roi_head import HybridTaskCascadeRoIHead
from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
FusedSemanticHead, GlobalContextHead, GridHead,
HTCMaskHead, MaskIoUHead, MaskPointHead,
SCNetMaskHead, SCNetSemanticHead)
from .mask_scoring_roi_head import MaskScoringRoIHead
from .pisa_roi_head import PISARoIHead
from .point_rend_roi_head import PointRendRoIHead
from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,
SingleRoIExtractor)
from .scnet_roi_head import SCNetRoIHead
from .shared_heads import ResLayer
from .sparse_roi_head import SparseRoIHead
from .standard_roi_head import StandardRoIHead
from .trident_roi_head import TridentRoIHead
__all__ = [
'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',
'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',
'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
'FeatureRelayHead', 'GlobalContextHead'
]
|
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import CascadeRoIHead
from .double_roi_head import DoubleHeadRoIHead
from .dynamic_roi_head import DynamicRoIHead
from .grid_roi_head import GridRoIHead
from .htc_roi_head import HybridTaskCascadeRoIHead
from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
FusedSemanticHead, GlobalContextHead, GridHead,
HTCMaskHead, MaskIoUHead, MaskPointHead,
SCNetMaskHead, SCNetSemanticHead)
from .mask_scoring_roi_head import MaskScoringRoIHead
from .pisa_roi_head import PISARoIHead
from .point_rend_roi_head import PointRendRoIHead
from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,
SingleRoIExtractor)
from .scnet_roi_head import SCNetRoIHead
from .shared_heads import ResLayer
from .sparse_roi_head import SparseRoIHead
from .standard_roi_head import StandardRoIHead
from .trident_roi_head import TridentRoIHead
__all__ = [
'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',
'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',
'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
'FeatureRelayHead', 'GlobalContextHead'
]
|
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None,
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools]
)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return agent
|
from typing import Optional, Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None,
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools]
)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return agent
|
"""Test Ollama Chat API wrapper."""
from typing import Any
from unittest.mock import patch
from langchain_ollama import OllamaLLM
MODEL_NAME = "llama3.1"
def test_initialization() -> None:
"""Test integration initialization."""
OllamaLLM(model=MODEL_NAME)
def test_model_params() -> None:
# Test standard tracing params
llm = OllamaLLM(model=MODEL_NAME)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": MODEL_NAME,
}
llm = OllamaLLM(model=MODEL_NAME, num_predict=3)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": MODEL_NAME,
"ls_max_tokens": 3,
}
@patch("langchain_ollama.llms.validate_model")
def test_validate_model_on_init(mock_validate_model: Any) -> None:
"""Test that the model is validated on initialization when requested."""
# Test that validate_model is called when validate_model_on_init=True
OllamaLLM(model=MODEL_NAME, validate_model_on_init=True)
mock_validate_model.assert_called_once()
mock_validate_model.reset_mock()
# Test that validate_model is NOT called when validate_model_on_init=False
OllamaLLM(model=MODEL_NAME, validate_model_on_init=False)
mock_validate_model.assert_not_called()
# Test that validate_model is NOT called by default
OllamaLLM(model=MODEL_NAME)
mock_validate_model.assert_not_called()
|
"""Test Ollama Chat API wrapper."""
from typing import Any
from unittest.mock import patch
from langchain_ollama import OllamaLLM
MODEL_NAME = "llama3.1"
def test_initialization() -> None:
"""Test integration initialization."""
OllamaLLM(model="llama3")
def test_model_params() -> None:
# Test standard tracing params
llm = OllamaLLM(model="llama3")
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": "llama3",
}
llm = OllamaLLM(model="llama3", num_predict=3)
ls_params = llm._get_ls_params()
assert ls_params == {
"ls_provider": "ollama",
"ls_model_type": "llm",
"ls_model_name": "llama3",
"ls_max_tokens": 3,
}
@patch("langchain_ollama.llms.validate_model")
def test_validate_model_on_init(mock_validate_model: Any) -> None:
"""Test that the model is validated on initialization when requested."""
# Test that validate_model is called when validate_model_on_init=True
OllamaLLM(model=MODEL_NAME, validate_model_on_init=True)
mock_validate_model.assert_called_once()
mock_validate_model.reset_mock()
# Test that validate_model is NOT called when validate_model_on_init=False
OllamaLLM(model=MODEL_NAME, validate_model_on_init=False)
mock_validate_model.assert_not_called()
# Test that validate_model is NOT called by default
OllamaLLM(model=MODEL_NAME)
mock_validate_model.assert_not_called()
|
from typing import TypeVar
from fastapi import Depends, FastAPI
from fastapi.testclient import TestClient
from typing_extensions import Annotated
app = FastAPI()
T = TypeVar("T")
Dep = Annotated[T, Depends()]
class A:
pass
class B:
pass
@app.get("/a")
async def a(dep: Dep[A]):
return {"cls": dep.__class__.__name__}
@app.get("/b")
async def b(dep: Dep[B]):
return {"cls": dep.__class__.__name__}
client = TestClient(app)
def test_generic_parameterless_depends():
response = client.get("/a")
assert response.status_code == 200, response.text
assert response.json() == {"cls": "A"}
response = client.get("/b")
assert response.status_code == 200, response.text
assert response.json() == {"cls": "B"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"info": {"title": "FastAPI", "version": "0.1.0"},
"openapi": "3.1.0",
"paths": {
"/a": {
"get": {
"operationId": "a_a_get",
"responses": {
"200": {
"content": {"application/json": {"schema": {}}},
"description": "Successful Response",
}
},
"summary": "A",
}
},
"/b": {
"get": {
"operationId": "b_b_get",
"responses": {
"200": {
"content": {"application/json": {"schema": {}}},
"description": "Successful Response",
}
},
"summary": "B",
}
},
},
}
|
from typing import TypeVar
from fastapi import Depends, FastAPI
from fastapi.testclient import TestClient
from typing_extensions import Annotated
app = FastAPI()
T = TypeVar("T")
Dep = Annotated[T, Depends()]
class A:
pass
class B:
pass
@app.get("/a")
async def a(dep: Dep[A]):
return {"cls": dep.__class__.__name__}
@app.get("/b")
async def b(dep: Dep[B]):
return {"cls": dep.__class__.__name__}
client = TestClient(app)
def test_generic_parameterless_depends():
response = client.get("/a")
assert response.status_code == 200, response.text
assert response.json() == {"cls": "A"}
response = client.get("/b")
assert response.status_code == 200, response.text
assert response.json() == {"cls": "B"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"info": {"title": "FastAPI", "version": "0.1.0"},
"openapi": "3.1.0",
"paths": {
"/a": {
"get": {
"operationId": "a_a_get",
"responses": {
"200": {
"content": {"application/json": {"schema": {}}},
"description": "Successful " "Response",
}
},
"summary": "A",
}
},
"/b": {
"get": {
"operationId": "b_b_get",
"responses": {
"200": {
"content": {"application/json": {"schema": {}}},
"description": "Successful " "Response",
}
},
"summary": "B",
}
},
},
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite."""
import argparse
import time
import numpy as np
from PIL import Image
from tensorflow.lite.python import lite
def load_labels(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--image',
default='/tmp/grace_hopper.bmp',
help='image to be classified')
parser.add_argument(
'-m',
'--model_file',
default='/tmp/mobilenet_v1_1.0_224_quant.tflite',
help='.tflite model to be executed')
parser.add_argument(
'-l',
'--label_file',
default='/tmp/labels.txt',
help='name of file containing labels')
parser.add_argument(
'--input_mean',
default=127.5, type=float,
help='input_mean')
parser.add_argument(
'--input_std',
default=127.5, type=float,
help='input standard deviation')
parser.add_argument(
'--num_threads', default=None, type=int, help='number of threads')
parser.add_argument(
'-e', '--ext_delegate', help='external_delegate_library path')
parser.add_argument(
'-o',
'--ext_delegate_options',
help='external delegate options, \
format: "option1: value1; option2: value2"')
args = parser.parse_args()
ext_delegate = None
ext_delegate_options = {}
# parse extenal delegate options
if args.ext_delegate_options is not None:
options = args.ext_delegate_options.split(';')
for o in options:
kv = o.split(':')
if (len(kv) == 2):
ext_delegate_options[kv[0].strip()] = kv[1].strip()
else:
raise RuntimeError('Error parsing delegate option: ' + o)
# load external delegate
if args.ext_delegate is not None:
print('Loading external delegate from {} with args: {}'.format(
args.ext_delegate, ext_delegate_options))
ext_delegate = [
tflite.load_delegate(args.ext_delegate, ext_delegate_options)
]
interpreter = lite.Interpreter(
model_path=args.model_file,
experimental_delegates=ext_delegate,
num_threads=args.num_threads)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# check the type of the input tensor
floating_model = input_details[0]['dtype'] == np.float32
# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open(args.image).resize((width, height))
# add N dim
input_data = np.expand_dims(img, axis=0)
if floating_model:
input_data = (np.float32(input_data) - args.input_mean) / args.input_std
interpreter.set_tensor(input_details[0]['index'], input_data)
start_time = time.time()
interpreter.invoke()
stop_time = time.time()
output_data = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(output_data)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(args.label_file)
for i in top_k:
if floating_model:
print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
else:
print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))
print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite."""
import argparse
import time
import numpy as np
from PIL import Image
import tensorflow as tf
def load_labels(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--image',
default='/tmp/grace_hopper.bmp',
help='image to be classified')
parser.add_argument(
'-m',
'--model_file',
default='/tmp/mobilenet_v1_1.0_224_quant.tflite',
help='.tflite model to be executed')
parser.add_argument(
'-l',
'--label_file',
default='/tmp/labels.txt',
help='name of file containing labels')
parser.add_argument(
'--input_mean',
default=127.5, type=float,
help='input_mean')
parser.add_argument(
'--input_std',
default=127.5, type=float,
help='input standard deviation')
parser.add_argument(
'--num_threads', default=None, type=int, help='number of threads')
parser.add_argument(
'-e', '--ext_delegate', help='external_delegate_library path')
parser.add_argument(
'-o',
'--ext_delegate_options',
help='external delegate options, \
format: "option1: value1; option2: value2"')
args = parser.parse_args()
ext_delegate = None
ext_delegate_options = {}
# parse extenal delegate options
if args.ext_delegate_options is not None:
options = args.ext_delegate_options.split(';')
for o in options:
kv = o.split(':')
if (len(kv) == 2):
ext_delegate_options[kv[0].strip()] = kv[1].strip()
else:
raise RuntimeError('Error parsing delegate option: ' + o)
# load external delegate
if args.ext_delegate is not None:
print('Loading external delegate from {} with args: {}'.format(
args.ext_delegate, ext_delegate_options))
ext_delegate = [
tflite.load_delegate(args.ext_delegate, ext_delegate_options)
]
interpreter = tf.lite.Interpreter(
model_path=args.model_file,
experimental_delegates=ext_delegate,
num_threads=args.num_threads)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# check the type of the input tensor
floating_model = input_details[0]['dtype'] == np.float32
# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open(args.image).resize((width, height))
# add N dim
input_data = np.expand_dims(img, axis=0)
if floating_model:
input_data = (np.float32(input_data) - args.input_mean) / args.input_std
interpreter.set_tensor(input_details[0]['index'], input_data)
start_time = time.time()
interpreter.invoke()
stop_time = time.time()
output_data = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(output_data)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(args.label_file)
for i in top_k:
if floating_model:
print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
else:
print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))
print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))
|
from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if backend.backend() == "torch" and backend.is_tensor(inputs):
# Plain `np.asarray()` conversion fails with PyTorch.
inputs = backend.convert_to_numpy(inputs)
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def is_ragged_tensor(x):
return "ragged_tensor.RaggedTensor" in str(type(x))
def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):
"""Apply binary or count encoding to an input and return a sparse tensor."""
result = tf.sparse.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
axis=-1,
binary_output=binary_output,
)
result = tf.cast(result, dtype)
if inputs.shape.rank == 1:
output_shape = (depth,)
else:
batch_size = tf.shape(result)[0]
output_shape = (batch_size, depth)
result = tf.SparseTensor(
indices=result.indices, values=result.values, dense_shape=output_shape
)
return result
def dense_bincount(inputs, depth, binary_output, dtype, count_weights=None):
"""Apply binary or count encoding to an input."""
result = tf.math.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
dtype=dtype,
axis=-1,
binary_output=binary_output,
)
if inputs.shape.rank == 1:
result.set_shape(tf.TensorShape((depth,)))
else:
batch_size = inputs.shape.as_list()[0]
result.set_shape(tf.TensorShape((batch_size, depth)))
return result
def expand_dims(inputs, axis):
"""Expand dims on sparse, ragged, or dense tensors."""
if isinstance(inputs, tf.SparseTensor):
return tf.sparse.expand_dims(inputs, axis)
return tf.expand_dims(inputs, axis)
def tf_encode_categorical_inputs(
inputs,
output_mode,
depth,
dtype="float32",
sparse=False,
count_weights=None,
idf_weights=None,
):
"""Encodes categorical inputs according to output_mode.
Faster method that relies on bincount.
"""
if output_mode == "int":
return tf.identity(tf.cast(inputs, dtype))
original_shape = inputs.shape
# In all cases, we should uprank scalar input to a single sample.
if inputs.shape.rank == 0:
inputs = expand_dims(inputs, -1)
# One hot will unprank only if the final output dimension is not already 1.
if output_mode == "one_hot":
if inputs.shape[-1] != 1:
inputs = expand_dims(inputs, -1)
if inputs.shape.rank > 2:
raise ValueError(
"When output_mode is not `'int'`, maximum supported output rank "
f"is 2. Received output_mode {output_mode} and input shape "
f"{original_shape}, "
f"which would result in output rank {inputs.shape.rank}."
)
binary_output = output_mode in ("multi_hot", "one_hot")
if sparse:
bincounts = sparse_bincount(
inputs, depth, binary_output, dtype, count_weights
)
else:
bincounts = dense_bincount(
inputs, depth, binary_output, dtype, count_weights
)
bincounts = tf.cast(bincounts, dtype)
if output_mode != "tf_idf":
return bincounts
if idf_weights is None:
raise ValueError(
"When output mode is `'tf_idf'`, idf_weights must be provided. "
f"Received: output_mode={output_mode} and idf_weights={idf_weights}"
)
if sparse:
value_weights = tf.gather(idf_weights, bincounts.indices[:, -1])
return tf.SparseTensor(
bincounts.indices,
value_weights * bincounts.values,
bincounts.dense_shape,
)
else:
return tf.multiply(bincounts, idf_weights)
|
from keras.src import backend
from keras.src.utils.module_utils import tensorflow as tf
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
if isinstance(t, tf.TypeSpec):
spec = t
elif isinstance(t, tf.__internal__.CompositeTensor):
# Check for ExtensionTypes
spec = t._type_spec
elif hasattr(t, "shape") and hasattr(t, "dtype"):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
shape = tf.TensorShape(shape_list)
spec._shape = shape
return spec
def ensure_tensor(inputs, dtype=None):
"""Ensures the input is a Tensor, SparseTensor or RaggedTensor."""
if not isinstance(inputs, (tf.Tensor, tf.SparseTensor, tf.RaggedTensor)):
if backend.backend() == "torch" and backend.is_tensor(inputs):
# Plain `np.asarray()` conversion fails with PyTorch.
inputs = backend.convert_to_numpy(inputs)
inputs = tf.convert_to_tensor(inputs, dtype)
if dtype is not None and inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def is_ragged_tensor(x):
return "ragged_tensor.RaggedTensor" in str(type(x))
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('stack', [False, True])
@pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)])
def test_batch(shuffle, stack, batch_size, n_batches):
class MyDoc(BaseDoc):
id: int
tensor: NdArray
t_shape = (32, 32)
da = DocList[MyDoc](
[
MyDoc(
id=str(i),
tensor=np.zeros(t_shape),
)
for i in range(100)
]
)
if stack:
da = da.to_doc_vec()
batches = list(da._batch(batch_size=batch_size, shuffle=shuffle))
assert len(batches) == n_batches
for i, batch in enumerate(batches):
if i < n_batches - 1:
assert len(batch) == batch_size
if stack:
assert batch.tensor.shape == (batch_size, *t_shape)
else:
assert len(batch) <= batch_size
non_shuffled_ids = [
i for i in range(i * batch_size, min((i + 1) * batch_size, len(da)))
]
if not shuffle:
assert batch.id == non_shuffled_ids
else:
assert not (batch.id == non_shuffled_ids)
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('stack', [False, True])
@pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)])
def test_batch(shuffle, stack, batch_size, n_batches):
class MyDoc(BaseDoc):
id: int
tensor: NdArray
t_shape = (32, 32)
da = DocList[MyDoc](
[
MyDoc(
id=i,
tensor=np.zeros(t_shape),
)
for i in range(100)
]
)
if stack:
da = da.to_doc_vec()
batches = list(da._batch(batch_size=batch_size, shuffle=shuffle))
assert len(batches) == n_batches
for i, batch in enumerate(batches):
if i < n_batches - 1:
assert len(batch) == batch_size
if stack:
assert batch.tensor.shape == (batch_size, *t_shape)
else:
assert len(batch) <= batch_size
non_shuffled_ids = [
i for i in range(i * batch_size, min((i + 1) * batch_size, len(da)))
]
if not shuffle:
assert batch.id == non_shuffled_ids
else:
assert not (batch.id == non_shuffled_ids)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("gpu_hist")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("gpu_hist")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
) -> None:
run_data_iterator(
n_samples_per_batch, n_features, n_batches, "gpu_hist", subsample, use_cupy
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(1024, 2, 3, "approx", False, True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
|
# coding: utf-8
from pathlib import Path
import numpy as np
preds = [np.loadtxt(str(name)) for name in Path(__file__).absolute().parent.glob("*.pred")]
np.testing.assert_allclose(preds[0], preds[1])
|
# coding: utf-8
from pathlib import Path
import numpy as np
preds = [np.loadtxt(str(name)) for name in Path(__file__).absolute().parent.glob('*.pred')]
np.testing.assert_allclose(preds[0], preds[1])
|
import warnings
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import torch
from torch import nn
from torchvision import transforms as _transforms
from torchvision.prototype.transforms import Transform
class Compose(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
super().__init__()
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomApply(Transform):
_v1_transform_cls = _transforms.RandomApply
def __init__(self, transforms: Union[Sequence[Callable], nn.ModuleList], p: float = 0.5) -> None:
super().__init__()
if not isinstance(transforms, (Sequence, nn.ModuleList)):
raise TypeError("Argument transforms should be a sequence of callables or a `nn.ModuleList`")
self.transforms = transforms
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
self.p = p
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
return {"transforms": self.transforms, "p": self.p}
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
if torch.rand(1) >= self.p:
return sample
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomChoice(Transform):
def __init__(
self,
transforms: Sequence[Callable],
probabilities: Optional[List[float]] = None,
p: Optional[List[float]] = None,
) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
if p is not None:
warnings.warn(
"Argument p is deprecated and will be removed in a future release. "
"Please use probabilities argument instead."
)
probabilities = p
if probabilities is None:
probabilities = [1] * len(transforms)
elif len(probabilities) != len(transforms):
raise ValueError(
f"The number of probabilities doesn't match the number of transforms: "
f"{len(probabilities)} != {len(transforms)}"
)
super().__init__()
self.transforms = transforms
total = sum(probabilities)
self.probabilities = [prob / total for prob in probabilities]
def forward(self, *inputs: Any) -> Any:
idx = int(torch.multinomial(torch.tensor(self.probabilities), 1))
transform = self.transforms[idx]
return transform(*inputs)
class RandomOrder(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
super().__init__()
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for idx in torch.randperm(len(self.transforms)):
transform = self.transforms[idx]
sample = transform(sample)
return sample
|
import warnings
from typing import Any, Callable, List, Optional, Sequence, Union
import torch
from torch import nn
from torchvision.prototype.transforms import Transform
class Compose(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
super().__init__()
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomApply(Transform):
def __init__(self, transforms: Union[Sequence[Callable], nn.ModuleList], p: float = 0.5) -> None:
super().__init__()
if not isinstance(transforms, (Sequence, nn.ModuleList)):
raise TypeError("Argument transforms should be a sequence of callables or a `nn.ModuleList`")
self.transforms = transforms
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
self.p = p
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
if torch.rand(1) >= self.p:
return sample
for transform in self.transforms:
sample = transform(sample)
return sample
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomChoice(Transform):
def __init__(
self,
transforms: Sequence[Callable],
probabilities: Optional[List[float]] = None,
p: Optional[List[float]] = None,
) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
if p is not None:
warnings.warn(
"Argument p is deprecated and will be removed in a future release. "
"Please use probabilities argument instead."
)
probabilities = p
if probabilities is None:
probabilities = [1] * len(transforms)
elif len(probabilities) != len(transforms):
raise ValueError(
f"The number of probabilities doesn't match the number of transforms: "
f"{len(probabilities)} != {len(transforms)}"
)
super().__init__()
self.transforms = transforms
total = sum(probabilities)
self.probabilities = [prob / total for prob in probabilities]
def forward(self, *inputs: Any) -> Any:
idx = int(torch.multinomial(torch.tensor(self.probabilities), 1))
transform = self.transforms[idx]
return transform(*inputs)
class RandomOrder(Transform):
def __init__(self, transforms: Sequence[Callable]) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
super().__init__()
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
for idx in torch.randperm(len(self.transforms)):
transform = self.transforms[idx]
sample = transform(sample)
return sample
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_run(self, runner) -> None:
"""Synchronize the number of iterations with the runner.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (
runner.train_loop.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_loop.dataloader
else:
cur_dataloader = runner.test_loop.dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_run(self, runner) -> None:
"""Synchronize the number of iterations with the runner.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (
runner.train_loop.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_loop.dataloader
else:
cur_dataloader = runner.test_loop.dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class _BatchNormXd(nn.modules.batchnorm._BatchNorm):
"""A general BatchNorm layer without input dimension check.
Reproduced from @kapily's work:
(https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
is `_check_input_dim` that is designed for tensor sanity checks.
The check has been bypassed in this class for the convenience of converting
SyncBatchNorm.
"""
def _check_input_dim(self, input: torch.Tensor):
return
def revert_sync_batchnorm(module: nn.Module) -> nn.Module:
"""Helper function to convert all `SyncBatchNorm` (SyncBN) and
`mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
`BatchNormXd` layers.
Adapted from @kapily's work:
(https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
Args:
module (nn.Module): The module containing `SyncBatchNorm` layers.
Returns:
module_output: The converted module with `BatchNormXd` layers.
"""
module_output = module
module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
try:
import mmcv
except ImportError:
pass
else:
if hasattr(mmcv, 'ops'):
module_checklist.append(mmcv.ops.SyncBatchNorm)
if isinstance(module, tuple(module_checklist)):
module_output = _BatchNormXd(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
# no_grad() may not be needed here but
# just to be consistent with `convert_sync_batchnorm()`
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
module_output.training = module.training
# qconfig exists in quantized models
if hasattr(module, 'qconfig'):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, revert_sync_batchnorm(child))
del module
return module_output
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class _BatchNormXd(nn.modules.batchnorm._BatchNorm):
"""A general BatchNorm layer without input dimension check.
Reproduced from @kapily's work:
(https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
is `_check_input_dim` that is designed for tensor sanity checks.
The check has been bypassed in this class for the convenience of converting
SyncBatchNorm.
"""
def _check_input_dim(self, input: torch.Tensor):
return
def revert_sync_batchnorm(module: nn.Module) -> nn.Module:
"""Helper function to convert all `SyncBatchNorm` (SyncBN) and
`mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
`BatchNormXd` layers.
Adapted from @kapily's work:
(https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
Args:
module (nn.Module): The module containing `SyncBatchNorm` layers.
Returns:
module_output: The converted module with `BatchNormXd` layers.
"""
module_output = module
module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
if isinstance(module, tuple(module_checklist)):
module_output = _BatchNormXd(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
# no_grad() may not be needed here but
# just to be consistent with `convert_sync_batchnorm()`
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
module_output.training = module.training
# qconfig exists in quantized models
if hasattr(module, 'qconfig'):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, revert_sync_batchnorm(child))
del module
return module_output
|
PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" # noqa: E501
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{{{{
"action": "Final Answer",
"action_input": "Final response to human"
}}}}
```""" # noqa: E501
SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:""" # noqa: E501
|
# flake8: noqa
PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:"""
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
```
{{{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}}}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{{{{
"action": "Final Answer",
"action_input": "Final response to human"
}}}}
```"""
SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import PLUGIN_LAYERS
eps = 1e-6
@PLUGIN_LAYERS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
def extra_repr(self):
return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, '
f'warmup_iters={self.warmup_iters}')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import PLUGIN_LAYERS
eps = 1e-6
@PLUGIN_LAYERS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
import logging
from typing import Optional, cast
from autogpt_libs.auth.models import DEFAULT_USER_ID
from autogpt_libs.supabase_integration_credentials_store.types import (
UserIntegrations,
UserMetadata,
UserMetadataRaw,
)
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
from backend.util.encryption import JSONCryptor
logger = logging.getLogger(__name__)
async def get_or_create_user(user_data: dict) -> User:
user_id = user_data.get("sub")
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
user_email = user_data.get("email")
if not user_email:
raise HTTPException(status_code=401, detail="Email not found in token")
user = await prisma.user.find_unique(where={"id": user_id})
if not user:
user = await prisma.user.create(
data={
"id": user_id,
"email": user_email,
"name": user_data.get("user_metadata", {}).get("name"),
}
)
return User.model_validate(user)
async def get_user_by_id(user_id: str) -> Optional[User]:
user = await prisma.user.find_unique(where={"id": user_id})
return User.model_validate(user) if user else None
async def create_default_user() -> Optional[User]:
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
if not user:
user = await prisma.user.create(
data={
"id": DEFAULT_USER_ID,
"email": "default@example.com",
"name": "Default User",
}
)
return User.model_validate(user)
async def get_user_metadata(user_id: str) -> UserMetadata:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
metadata = cast(UserMetadataRaw, user.metadata)
return UserMetadata.model_validate(metadata)
async def update_user_metadata(user_id: str, metadata: UserMetadata):
await User.prisma().update(
where={"id": user_id},
data={"metadata": Json(metadata.model_dump())},
)
async def get_user_integrations(user_id: str) -> UserIntegrations:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
encrypted_integrations = user.integrations
if not encrypted_integrations:
return UserIntegrations()
else:
return UserIntegrations.model_validate(
JSONCryptor().decrypt(encrypted_integrations)
)
async def update_user_integrations(user_id: str, data: UserIntegrations):
encrypted_data = JSONCryptor().encrypt(data.model_dump())
await User.prisma().update(
where={"id": user_id},
data={"integrations": encrypted_data},
)
async def migrate_and_encrypt_user_integrations():
"""Migrate integration credentials and OAuth states from metadata to integrations column."""
users = await User.prisma().find_many(
where={
"metadata": {
"path": ["integration_credentials"],
"not": Json({"a": "yolo"}), # bogus value works to check if key exists
} # type: ignore
}
)
logger.info(f"Migrating integration credentials for {len(users)} users")
for user in users:
raw_metadata = cast(UserMetadataRaw, user.metadata)
metadata = UserMetadata.model_validate(raw_metadata)
# Get existing integrations data
integrations = await get_user_integrations(user_id=user.id)
# Copy credentials and oauth states from metadata if they exist
if metadata.integration_credentials and not integrations.credentials:
integrations.credentials = metadata.integration_credentials
if metadata.integration_oauth_states:
integrations.oauth_states = metadata.integration_oauth_states
# Save to integrations column
await update_user_integrations(user_id=user.id, data=integrations)
# Remove from metadata
raw_metadata = dict(raw_metadata)
raw_metadata.pop("integration_credentials", None)
raw_metadata.pop("integration_oauth_states", None)
# Update metadata without integration data
await User.prisma().update(
where={"id": user.id},
data={"metadata": Json(raw_metadata)},
)
|
import logging
from typing import Optional, cast
from autogpt_libs.supabase_integration_credentials_store.types import (
UserIntegrations,
UserMetadata,
UserMetadataRaw,
)
from fastapi import HTTPException
from prisma import Json
from prisma.models import User
from backend.data.db import prisma
from backend.util.encryption import JSONCryptor
logger = logging.getLogger(__name__)
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "default@example.com"
async def get_or_create_user(user_data: dict) -> User:
user_id = user_data.get("sub")
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
user_email = user_data.get("email")
if not user_email:
raise HTTPException(status_code=401, detail="Email not found in token")
user = await prisma.user.find_unique(where={"id": user_id})
if not user:
user = await prisma.user.create(
data={
"id": user_id,
"email": user_email,
"name": user_data.get("user_metadata", {}).get("name"),
}
)
return User.model_validate(user)
async def get_user_by_id(user_id: str) -> Optional[User]:
user = await prisma.user.find_unique(where={"id": user_id})
return User.model_validate(user) if user else None
async def create_default_user() -> Optional[User]:
user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
if not user:
user = await prisma.user.create(
data={
"id": DEFAULT_USER_ID,
"email": "default@example.com",
"name": "Default User",
}
)
return User.model_validate(user)
async def get_user_metadata(user_id: str) -> UserMetadata:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
metadata = cast(UserMetadataRaw, user.metadata)
return UserMetadata.model_validate(metadata)
async def update_user_metadata(user_id: str, metadata: UserMetadata):
await User.prisma().update(
where={"id": user_id},
data={"metadata": Json(metadata.model_dump())},
)
async def get_user_integrations(user_id: str) -> UserIntegrations:
user = await User.prisma().find_unique_or_raise(
where={"id": user_id},
)
encrypted_integrations = user.integrations
if not encrypted_integrations:
return UserIntegrations()
else:
return UserIntegrations.model_validate(
JSONCryptor().decrypt(encrypted_integrations)
)
async def update_user_integrations(user_id: str, data: UserIntegrations):
encrypted_data = JSONCryptor().encrypt(data.model_dump())
await User.prisma().update(
where={"id": user_id},
data={"integrations": encrypted_data},
)
async def migrate_and_encrypt_user_integrations():
"""Migrate integration credentials and OAuth states from metadata to integrations column."""
users = await User.prisma().find_many(
where={
"metadata": {
"path": ["integration_credentials"],
"not": Json({"a": "yolo"}), # bogus value works to check if key exists
} # type: ignore
}
)
logger.info(f"Migrating integration credentials for {len(users)} users")
for user in users:
raw_metadata = cast(UserMetadataRaw, user.metadata)
metadata = UserMetadata.model_validate(raw_metadata)
# Get existing integrations data
integrations = await get_user_integrations(user_id=user.id)
# Copy credentials and oauth states from metadata if they exist
if metadata.integration_credentials and not integrations.credentials:
integrations.credentials = metadata.integration_credentials
if metadata.integration_oauth_states:
integrations.oauth_states = metadata.integration_oauth_states
# Save to integrations column
await update_user_integrations(user_id=user.id, data=integrations)
# Remove from metadata
raw_metadata = dict(raw_metadata)
raw_metadata.pop("integration_credentials", None)
raw_metadata.pop("integration_oauth_states", None)
# Update metadata without integration data
await User.prisma().update(
where={"id": user.id},
data={"metadata": Json(raw_metadata)},
)
|
from typing import Dict, List
import numpy as np
import pytest
from orjson import orjson
from docarray import DocList
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDoc(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
@pytest.mark.parametrize('protocol', ['proto', 'json'])
def test_any_document_from_to(protocol):
class InnerDoc(BaseDoc):
text: str
t: Dict[str, str]
class DocTest(BaseDoc):
text: str
tags: Dict[str, int]
l_: List[int]
d: InnerDoc
ld: DocList[InnerDoc]
inner_doc = InnerDoc(text='I am inner', t={'a': 'b'})
da = DocList[DocTest](
[
DocTest(
text='type1',
tags={'type': 1},
l_=[1, 2],
d=inner_doc,
ld=DocList[InnerDoc]([inner_doc]),
),
DocTest(
text='type2',
tags={'type': 2},
l_=[1, 2],
d=inner_doc,
ld=DocList[InnerDoc]([inner_doc]),
),
]
)
from docarray.base_doc import AnyDoc
if protocol == 'proto':
aux = DocList[AnyDoc].from_protobuf(da.to_protobuf())
else:
aux = DocList[AnyDoc].from_json(da.to_json())
assert len(aux) == 2
assert len(aux.id) == 2
for i, d in enumerate(aux):
assert d.tags['type'] == i + 1
assert d.text == f'type{i + 1}'
assert d.l_ == [1, 2]
if protocol == 'proto':
assert isinstance(d.d, AnyDoc)
assert d.d.text == 'I am inner' # inner Document is a Dict
assert d.d.t == {'a': 'b'}
else:
assert isinstance(d.d, dict)
assert d.d['text'] == 'I am inner' # inner Document is a Dict
assert d.d['t'] == {'a': 'b'}
assert len(d.ld) == 1
if protocol == 'proto':
assert isinstance(d.ld[0], AnyDoc)
assert d.ld[0].text == 'I am inner'
assert d.ld[0].t == {'a': 'b'}
else:
assert isinstance(d.ld[0], dict)
assert d.ld[0]['text'] == 'I am inner'
assert d.ld[0]['t'] == {'a': 'b'}
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_subclass_config():
class MyDoc(BaseDoc):
x: str
class Config(BaseDoc.Config):
arbitrary_types_allowed = True # just an example setting
assert MyDoc.Config.json_loads == orjson.loads
assert MyDoc.Config.json_dumps == orjson_dumps_and_decode
assert (
MyDoc.Config.json_encoders[AbstractTensor](3) == 3
) # dirty check that it is identity
assert MyDoc.Config.validate_assignment
assert not MyDoc.Config._load_extra_fields_from_protobuf
assert MyDoc.Config.arbitrary_types_allowed
|
from typing import Dict, List
import numpy as np
import pytest
from orjson import orjson
from docarray import DocList
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.base_doc.io.json import orjson_dumps_and_decode
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDoc(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
@pytest.mark.parametrize('protocol', ['proto', 'json'])
def test_any_document_from_to(protocol):
class InnerDoc(BaseDoc):
text: str
t: Dict[str, str]
class DocTest(BaseDoc):
text: str
tags: Dict[str, int]
l_: List[int]
d: InnerDoc
ld: DocList[InnerDoc]
inner_doc = InnerDoc(text='I am inner', t={'a': 'b'})
da = DocList[DocTest](
[
DocTest(
text='type1',
tags={'type': 1},
l_=[1, 2],
d=inner_doc,
ld=DocList[InnerDoc]([inner_doc]),
),
DocTest(
text='type2',
tags={'type': 2},
l_=[1, 2],
d=inner_doc,
ld=DocList[InnerDoc]([inner_doc]),
),
]
)
from docarray.base_doc import AnyDoc
if protocol == 'proto':
aux = DocList[AnyDoc].from_protobuf(da.to_protobuf())
else:
aux = DocList[AnyDoc].from_json(da.to_json())
assert len(aux) == 2
assert len(aux.id) == 2
for i, d in enumerate(aux):
assert d.tags['type'] == i + 1
assert d.text == f'type{i + 1}'
assert d.l_ == [1, 2]
if protocol == 'proto':
assert isinstance(d.d, AnyDoc)
assert d.d.text == 'I am inner' # inner Document is a Dict
assert d.d.t == {'a': 'b'}
else:
assert isinstance(d.d, dict)
assert d.d['text'] == 'I am inner' # inner Document is a Dict
assert d.d['t'] == {'a': 'b'}
assert len(d.ld) == 1
if protocol == 'proto':
assert isinstance(d.ld[0], AnyDoc)
assert d.ld[0].text == 'I am inner'
assert d.ld[0].t == {'a': 'b'}
else:
assert isinstance(d.ld[0], dict)
assert d.ld[0]['text'] == 'I am inner'
assert d.ld[0]['t'] == {'a': 'b'}
def test_subclass_config():
class MyDoc(BaseDoc):
x: str
class Config(BaseDoc.Config):
arbitrary_types_allowed = True # just an example setting
assert MyDoc.Config.json_loads == orjson.loads
assert MyDoc.Config.json_dumps == orjson_dumps_and_decode
assert (
MyDoc.Config.json_encoders[AbstractTensor](3) == 3
) # dirty check that it is identity
assert MyDoc.Config.validate_assignment
assert not MyDoc.Config._load_extra_fields_from_protobuf
assert MyDoc.Config.arbitrary_types_allowed
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.mustc.lightning import MuSTCRNNTModule
class MockMUSTC:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
"sup",
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.mustc.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("asr.emformer_rnnt.mustc.lightning.MUSTC", new=MockMUSTC), patch(
"asr.emformer_rnnt.mustc.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield MuSTCRNNTModule(
mustc_path="mustc_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestMuSTCRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_common_dataloader"),
("test_step", "test_he_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.mustc.lightning import MuSTCRNNTModule
class MockMUSTC:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
"sup",
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.mustc.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("asr.emformer_rnnt.mustc.lightning.MUSTC", new=MockMUSTC), patch(
"asr.emformer_rnnt.mustc.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield MuSTCRNNTModule(
mustc_path="mustc_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestMuSTCRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_common_dataloader"),
("test_step", "test_he_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_nested_document_class(field))
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(Document):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_nested_document_class(field))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_panoptic_fusion_head import \
BasePanopticFusionHead # noqa: F401,F403
from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_panoptic_fusion_head import \
BasePanopticFusionHead # noqa: F401,F403
from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
@pytest.fixture(scope='function')
def tmp_index_name():
return uuid.uuid4().hex
|
import os
import time
import uuid
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
@pytest.fixture(scope='function')
def tmp_index_name():
return uuid.uuid4().hex
|
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.type import MediaFileType
class NvidiaDeepfakeDetectBlock(Block):
class Input(BlockSchema):
credentials: NvidiaCredentialsInput = NvidiaCredentialsField()
image_base64: MediaFileType = SchemaField(
description="Image to analyze for deepfakes",
)
return_image: bool = SchemaField(
description="Whether to return the processed image with markings",
default=False,
)
class Output(BlockSchema):
status: str = SchemaField(
description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)",
)
image: MediaFileType = SchemaField(
description="Processed image with detection markings (if return_image=True)",
)
is_deepfake: float = SchemaField(
description="Probability that the image is a deepfake (0-1)",
)
def __init__(self):
super().__init__(
id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f",
description="Detects potential deepfakes in images using Nvidia's AI API",
categories={BlockCategory.SAFETY},
input_schema=NvidiaDeepfakeDetectBlock.Input,
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
image_data = f"data:image/jpeg;base64,{input_data.image_base64}"
payload = {
"input": [image_data],
"return_image": input_data.return_image,
}
try:
response = await Requests().post(url, headers=headers, json=payload)
data = response.json()
result = data.get("data", [{}])[0]
# Get deepfake probability from first bounding box if any
deepfake_prob = 0.0
if result.get("bounding_boxes"):
deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0)
yield "status", result.get("status", "ERROR")
yield "is_deepfake", deepfake_prob
if input_data.return_image:
image_data = result.get("image", "")
output_data = f"data:image/jpeg;base64,{image_data}"
yield "image", output_data
else:
yield "image", ""
except Exception as e:
yield "error", str(e)
yield "status", "ERROR"
yield "is_deepfake", 0.0
yield "image", ""
|
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.type import MediaFileType
class NvidiaDeepfakeDetectBlock(Block):
class Input(BlockSchema):
credentials: NvidiaCredentialsInput = NvidiaCredentialsField()
image_base64: MediaFileType = SchemaField(
description="Image to analyze for deepfakes",
)
return_image: bool = SchemaField(
description="Whether to return the processed image with markings",
default=False,
)
class Output(BlockSchema):
status: str = SchemaField(
description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)",
)
image: MediaFileType = SchemaField(
description="Processed image with detection markings (if return_image=True)",
)
is_deepfake: float = SchemaField(
description="Probability that the image is a deepfake (0-1)",
)
def __init__(self):
super().__init__(
id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f",
description="Detects potential deepfakes in images using Nvidia's AI API",
categories={BlockCategory.SAFETY},
input_schema=NvidiaDeepfakeDetectBlock.Input,
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
image_data = f"data:image/jpeg;base64,{input_data.image_base64}"
payload = {
"input": [image_data],
"return_image": input_data.return_image,
}
try:
response = Requests().post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
result = data.get("data", [{}])[0]
# Get deepfake probability from first bounding box if any
deepfake_prob = 0.0
if result.get("bounding_boxes"):
deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0)
yield "status", result.get("status", "ERROR")
yield "is_deepfake", deepfake_prob
if input_data.return_image:
image_data = result.get("image", "")
output_data = f"data:image/jpeg;base64,{image_data}"
yield "image", output_data
else:
yield "image", ""
except Exception as e:
yield "error", str(e)
yield "status", "ERROR"
yield "is_deepfake", 0.0
yield "image", ""
|
# Owner(s): ["module: inductor"]
import unittest
import torch
from torch._inductor import config
from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_cuda import TEST_CUDA
class MatMulModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.matrix = torch.nn.Parameter(torch.eye(128, 128) * 2, requires_grad=True)
def forward(self, x):
return torch.matmul(x, self.matrix)
# torch.add performs better than torch.mm and got chosen during tuning
def matmul_cpu(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
def matmul_dup(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
def matmul_cuda(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
class TestInductorExternalCallable(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._saved_config = config.save_config()
def tearDown(self):
super().tearDown()
config.load_config(self._saved_config)
def test_matmul_cpu(self):
# 2I + 2I == (2I)(2I)
x = torch.eye(128, 128) * 2
opt_fn = torch.compile(
MatMulModule(),
options={"max_autotune": True, "external_matmul": [matmul_cpu]},
)
opt_fn_golden = torch.compile(MatMulModule(), options={"max_autotune": True})
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_cpu}) failed",
)
def test_matmul_dup(self):
# 2I + 2I == (2I)(2I)
x = torch.eye(128, 128) * 2
# This should only register the first external call
opt_fn = torch.compile(
MatMulModule(),
options={"max_autotune": True, "external_matmul": [matmul_dup, matmul_dup]},
)
opt_fn_golden = torch.compile(MatMulModule(), options={"max_autotune": True})
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_dup}) failed",
)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(
torch.cuda.is_available() and torch.cuda.get_device_capability() < (7, 0),
"Triton does not support device capability < 7.0",
)
def test_matmul_cuda(self):
device = torch.device("cuda")
x = (torch.eye(128, 128) * 2).to(device=device)
opt_fn = torch.compile(
MatMulModule().to(device),
options={"max_autotune": True, "external_matmul": [matmul_cuda]},
)
opt_fn_golden = torch.compile(
MatMulModule().to(device), options={"max_autotune": True}
)
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_cuda}) failed",
)
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["module: inductor"]
import unittest
import torch
from torch._inductor import config
from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.common_cuda import TEST_CUDA
class MatMulModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.matrix = torch.nn.Parameter(torch.eye(128, 128) * 2, requires_grad=True)
def forward(self, x):
return torch.matmul(x, self.matrix)
# torch.add performs better than torch.mm and got choosed during tuning
def matmul_cpu(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
def matmul_dup(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
def matmul_cuda(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
class TestInductorExternalCallable(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._saved_config = config.save_config()
def tearDown(self):
super().tearDown()
config.load_config(self._saved_config)
def test_matmul_cpu(self):
# 2I + 2I == (2I)(2I)
x = torch.eye(128, 128) * 2
opt_fn = torch.compile(
MatMulModule(),
options={"max_autotune": True, "external_matmul": [matmul_cpu]},
)
opt_fn_golden = torch.compile(MatMulModule(), options={"max_autotune": True})
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_cpu}) failed",
)
def test_matmul_dup(self):
# 2I + 2I == (2I)(2I)
x = torch.eye(128, 128) * 2
# This should only register the first external call
opt_fn = torch.compile(
MatMulModule(),
options={"max_autotune": True, "external_matmul": [matmul_dup, matmul_dup]},
)
opt_fn_golden = torch.compile(MatMulModule(), options={"max_autotune": True})
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_dup}) failed",
)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(
torch.cuda.is_available() and torch.cuda.get_device_capability() < (7, 0),
"Triton does not support device capability < 7.0",
)
def test_matmul_cuda(self):
device = torch.device("cuda")
x = (torch.eye(128, 128) * 2).to(device=device)
opt_fn = torch.compile(
MatMulModule().to(device),
options={"max_autotune": True, "external_matmul": [matmul_cuda]},
)
opt_fn_golden = torch.compile(
MatMulModule().to(device), options={"max_autotune": True}
)
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_cuda}) failed",
)
if __name__ == "__main__":
run_tests()
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
]
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
# delete the root document
self._del_docs_by_ids([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def __del__(self) -> None:
self._annlite.close()
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
# delete the root document
self._del_docs_by_ids([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def __del__(self) -> None:
if not self._persist:
self._offset2ids.clear()
self._annlite.clear()
self._annlite.close()
super().__del__()
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=None,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
from typing import Any, Dict
from backend.data.block import Block
from backend.util.request import Requests
from ._api import Color, CustomerDetails, OrderItem, Profile
class Slant3DBlockBase(Block):
"""Base block class for Slant3D API interactions"""
BASE_URL = "https://www.slant3dapi.com/api"
def _get_headers(self, api_key: str) -> Dict[str, str]:
return {"api-key": api_key, "Content-Type": "application/json"}
async def _make_request(
self, method: str, endpoint: str, api_key: str, **kwargs
) -> Dict:
url = f"{self.BASE_URL}/{endpoint}"
response = await Requests().request(
method=method, url=url, headers=self._get_headers(api_key), **kwargs
)
resp = response.json()
if not response.ok:
error_msg = resp.get("error", "Unknown error")
raise RuntimeError(f"API request failed: {error_msg}")
return resp
async def _check_valid_color(
self, profile: Profile, color: Color, api_key: str
) -> str:
response = await self._make_request(
"GET",
"filament",
api_key,
params={"profile": profile.value, "color": color.value},
)
if profile == Profile.PLA:
color_tag = color.value
else:
color_tag = f"{profile.value.lower()}{color.value.capitalize()}"
valid_tags = [filament["colorTag"] for filament in response["filaments"]]
if color_tag not in valid_tags:
raise ValueError(
f"""Invalid color profile combination {color_tag}.
Valid colors for {profile.value} are:
{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])}
"""
)
return color_tag
async def _convert_to_color(
self, profile: Profile, color: Color, api_key: str
) -> str:
return await self._check_valid_color(profile, color, api_key)
async def _format_order_data(
self,
customer: CustomerDetails,
order_number: str,
items: list[OrderItem],
api_key: str,
) -> list[dict[str, Any]]:
"""Helper function to format order data for API requests"""
orders = []
for item in items:
color_tag = await self._convert_to_color(item.profile, item.color, api_key)
order_data = {
"email": customer.email,
"phone": customer.phone,
"name": customer.name,
"orderNumber": order_number,
"filename": item.file_url,
"fileURL": item.file_url,
"bill_to_street_1": customer.address,
"bill_to_city": customer.city,
"bill_to_state": customer.state,
"bill_to_zip": customer.zip,
"bill_to_country_as_iso": customer.country_iso,
"bill_to_is_US_residential": str(customer.is_residential).lower(),
"ship_to_name": customer.name,
"ship_to_street_1": customer.address,
"ship_to_city": customer.city,
"ship_to_state": customer.state,
"ship_to_zip": customer.zip,
"ship_to_country_as_iso": customer.country_iso,
"ship_to_is_US_residential": str(customer.is_residential).lower(),
"order_item_name": item.file_url,
"order_quantity": item.quantity,
"order_image_url": "",
"order_sku": "NOT_USED",
"order_item_color": color_tag,
"profile": item.profile.value,
}
orders.append(order_data)
return orders
|
from typing import Any, Dict
from backend.data.block import Block
from backend.util.request import Requests
from ._api import Color, CustomerDetails, OrderItem, Profile
class Slant3DBlockBase(Block):
"""Base block class for Slant3D API interactions"""
BASE_URL = "https://www.slant3dapi.com/api"
def _get_headers(self, api_key: str) -> Dict[str, str]:
return {"api-key": api_key, "Content-Type": "application/json"}
def _make_request(self, method: str, endpoint: str, api_key: str, **kwargs) -> Dict:
url = f"{self.BASE_URL}/{endpoint}"
response = Requests().request(
method=method, url=url, headers=self._get_headers(api_key), **kwargs
)
if not response.ok:
error_msg = response.json().get("error", "Unknown error")
raise RuntimeError(f"API request failed: {error_msg}")
return response.json()
def _check_valid_color(self, profile: Profile, color: Color, api_key: str) -> str:
response = self._make_request(
"GET",
"filament",
api_key,
params={"profile": profile.value, "color": color.value},
)
if profile == Profile.PLA:
color_tag = color.value
else:
color_tag = f"{profile.value.lower()}{color.value.capitalize()}"
valid_tags = [filament["colorTag"] for filament in response["filaments"]]
if color_tag not in valid_tags:
raise ValueError(
f"""Invalid color profile combination {color_tag}.
Valid colors for {profile.value} are:
{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])}
"""
)
return color_tag
def _convert_to_color(self, profile: Profile, color: Color, api_key: str) -> str:
return self._check_valid_color(profile, color, api_key)
def _format_order_data(
self,
customer: CustomerDetails,
order_number: str,
items: list[OrderItem],
api_key: str,
) -> list[dict[str, Any]]:
"""Helper function to format order data for API requests"""
orders = []
for item in items:
order_data = {
"email": customer.email,
"phone": customer.phone,
"name": customer.name,
"orderNumber": order_number,
"filename": item.file_url,
"fileURL": item.file_url,
"bill_to_street_1": customer.address,
"bill_to_city": customer.city,
"bill_to_state": customer.state,
"bill_to_zip": customer.zip,
"bill_to_country_as_iso": customer.country_iso,
"bill_to_is_US_residential": str(customer.is_residential).lower(),
"ship_to_name": customer.name,
"ship_to_street_1": customer.address,
"ship_to_city": customer.city,
"ship_to_state": customer.state,
"ship_to_zip": customer.zip,
"ship_to_country_as_iso": customer.country_iso,
"ship_to_is_US_residential": str(customer.is_residential).lower(),
"order_item_name": item.file_url,
"order_quantity": item.quantity,
"order_image_url": "",
"order_sku": "NOT_USED",
"order_item_color": self._convert_to_color(
item.profile, item.color, api_key
),
"profile": item.profile.value,
}
orders.append(order_data)
return orders
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
from mmengine.registry import init_default_scope
from mmdet.registry import TASK_UTILS
class TestInterpolateTracklets(TestCase):
@classmethod
def setUpClass(cls):
init_default_scope('mmdet')
cls.cfg = dict(
type='InterpolateTracklets',
min_num_frames=5,
max_num_frames=20,
use_gsi=True,
smooth_tau=10)
def test_init(self):
interpolation = TASK_UTILS.build(self.cfg)
assert interpolation.min_num_frames == 5
assert interpolation.max_num_frames == 20
assert interpolation.use_gsi
assert interpolation.smooth_tau == 10
def test_forward(self):
pred_track = np.random.randn(5, 7)
# set frame_id and target_id
pred_track[:, 0] = np.array([1, 2, 5, 6, 7])
pred_track[:, 1] = 1
interpolation = TASK_UTILS.build(self.cfg)
linked_track = interpolation.forward(pred_track)
assert isinstance(linked_track, np.ndarray)
assert linked_track.shape == (5, 7)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
from mmdet.registry import TASK_UTILS
from mmdet.utils import register_all_modules
class TestInterpolateTracklets(TestCase):
@classmethod
def setUpClass(cls):
register_all_modules()
cls.cfg = dict(
type='InterpolateTracklets',
min_num_frames=5,
max_num_frames=20,
use_gsi=True,
smooth_tau=10)
def test_init(self):
interpolation = TASK_UTILS.build(self.cfg)
assert interpolation.min_num_frames == 5
assert interpolation.max_num_frames == 20
assert interpolation.use_gsi
assert interpolation.smooth_tau == 10
def test_forward(self):
pred_track = np.random.randn(5, 7)
# set frame_id and target_id
pred_track[:, 0] = np.array([1, 2, 5, 6, 7])
pred_track[:, 1] = 1
interpolation = TASK_UTILS.build(self.cfg)
linked_track = interpolation.forward(pred_track)
assert isinstance(linked_track, np.ndarray)
assert linked_track.shape == (5, 7)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileNetV2."""
from typing import Optional
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling
from ...utils import auto_docstring, is_torch_available, is_torch_tensor
if is_torch_available():
import torch
@auto_docstring
class MobileNetV2ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"shortest_edge": 256}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = None
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
# TODO: add support for other frameworks
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileNetV2ImageProcessorFast"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileNetV2."""
from typing import List, Optional, Tuple
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling
from ...utils import auto_docstring, is_torch_available, is_torch_tensor
if is_torch_available():
import torch
@auto_docstring
class MobileNetV2ImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"shortest_edge": 256}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = None
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]] = None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
# TODO: add support for other frameworks
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileNetV2ImageProcessorFast"]
|
"""
In SecGPT, all messages exchanged among spokes conform to predefined formats, encapsulated within the Message class.
"""
import json
class Message:
@staticmethod
def function_probe_request(spoke_id, function):
"""
Create a function probe request message.
Args:
spoke_id (str): The ID of the spoke sending the request.
function (str): The functionality being requested.
Returns:
bytes: The JSON-encoded function probe request message.
"""
message = {}
message["message_type"] = "function_probe_request"
message["spoke_id"] = spoke_id
message["requested_functionality"] = function # functionality name str
return json.dumps(message).encode("utf-8")
@staticmethod
def function_probe_response(spoke_id, function):
"""
Create a function probe response message.
Args:
spoke_id (str): The ID of the spoke sending the response.
function (str): The functionality being offered (in JSON format).
Returns:
bytes: The JSON-encoded function probe response message.
"""
message = {}
message["message_type"] = "function_probe_response"
message["spoke_id"] = spoke_id
message["functionality_offered"] = function # should be a json format
return json.dumps(message).encode("utf-8")
@staticmethod
def app_request(spoke_id, function, functionality_request):
"""
Create an application request message.
Args:
spoke_id (str): The ID of the spoke sending the request.
function (str): The functionality being requested.
functionality_request (str): The request body formatted in JSON.
Returns:
bytes: The JSON-encoded application request message.
"""
message = {}
message["message_type"] = "app_request"
message["spoke_id"] = spoke_id
message["functionality_request"] = function
message["request_body"] = functionality_request # format the request with json
return json.dumps(message).encode("utf-8")
@staticmethod
def app_response(spoke_id, functionality_response):
"""
Create an application response message.
Args:
spoke_id (str): The ID of the spoke sending the response.
functionality_response (str): The response body.
Returns:
bytes: The JSON-encoded application response message.
"""
message = {}
message["message_type"] = "app_response"
message["spoke_id"] = spoke_id
message["response"] = functionality_response
return json.dumps(message).encode("utf-8")
@staticmethod
def final_response(spoke_id, final_response):
"""
Create a final response message.
Args:
spoke_id (str): The ID of the spoke sending the final response.
final_response (str): The final response body.
Returns:
bytes: The JSON-encoded final response message.
"""
message = {}
message["message_type"] = "final_response"
message["spoke_id"] = spoke_id
message["response"] = final_response
return json.dumps(message).encode("utf-8")
@staticmethod
def no_functionality_response(spoke_id, functionality_request):
"""
Create a no functionality response message indicating the requested functionality was not found.
Args:
spoke_id (str): The ID of the spoke sending the response.
functionality_request (str): The functionality request that was not found.
Returns:
bytes: The JSON-encoded no functionality response message.
"""
message = {}
message["message_type"] = "no_functionality_response"
message["spoke_id"] = spoke_id
message["response"] = functionality_request + " not found"
return json.dumps(message).encode("utf-8")
@staticmethod
def functionality_denial_response(spoke_id, functionality_request):
"""
Create a functionality denial response message indicating the requested functionality refuses to respond.
Args:
spoke_id (str): The ID of the spoke sending the response.
functionality_request (str): The functionality request that is being denied.
Returns:
bytes: The JSON-encoded functionality denial response message.
"""
message = {}
message["message_type"] = "functionality_denial_response"
message["spoke_id"] = spoke_id
message["response"] = functionality_request + " refuses to respond"
return json.dumps(message).encode("utf-8")
|
"""
In SecGPT, all messages exchanged among spokes conform to predefined formats, encapsulated within the Message class.
"""
import json
class Message:
@staticmethod
def function_probe_request(spoke_id, function):
"""
Create a function probe request message.
Args:
spoke_id (str): The ID of the spoke sending the request.
function (str): The functionality being requested.
Returns:
bytes: The JSON-encoded function probe request message.
"""
message = {}
message["message_type"] = "function_probe_request"
message["spoke_id"] = spoke_id
message["requested_functionality"] = function # functionality name str
return json.dumps(message).encode("utf-8")
@staticmethod
def function_probe_response(spoke_id, function):
"""
Create a function probe response message.
Args:
spoke_id (str): The ID of the spoke sending the response.
function (str): The functionality being offered (in JSON format).
Returns:
bytes: The JSON-encoded function probe response message.
"""
message = {}
message["message_type"] = "function_probe_response"
message["spoke_id"] = spoke_id
message["functionality_offered"] = function # should be a json format
return json.dumps(message).encode("utf-8")
@staticmethod
def app_request(spoke_id, function, functionality_request):
"""
Create an application request message.
Args:
spoke_id (str): The ID of the spoke sending the request.
function (str): The functionality being requested.
functionality_request (str): The request body formatted in JSON.
Returns:
bytes: The JSON-encoded application request message.
"""
message = {}
message["message_type"] = "app_request"
message["spoke_id"] = spoke_id
message["functionality_request"] = function
message["request_body"] = functionality_request # format the request with json
return json.dumps(message).encode("utf-8")
@staticmethod
def app_response(spoke_id, functionality_response):
"""
Create an application response message.
Args:
spoke_id (str): The ID of the spoke sending the response.
functionality_response (str): The response body.
Returns:
bytes: The JSON-encoded application response message.
"""
message = {}
message["message_type"] = "app_response"
message["spoke_id"] = spoke_id
message["response"] = functionality_response
return json.dumps(message).encode("utf-8")
@staticmethod
def final_response(spoke_id, final_response):
"""
Create a final response message.
Args:
spoke_id (str): The ID of the spoke sending the final response.
final_response (str): The final response body.
Returns:
bytes: The JSON-encoded final response message.
"""
message = {}
message["message_type"] = "final_response"
message["spoke_id"] = spoke_id
message["response"] = final_response
return json.dumps(message).encode("utf-8")
@staticmethod
def no_functionality_response(spoke_id, functionality_request):
"""
Create a no functionality response message indicating the requested functionality was not found.
Args:
spoke_id (str): The ID of the spoke sending the response.
functionality_request (str): The functionality request that was not found.
Returns:
bytes: The JSON-encoded no functionality response message.
"""
message = {}
message["message_type"] = "no_functionality_response"
message["spoke_id"] = spoke_id
message["response"] = functionality_request + " not found"
return json.dumps(message).encode("utf-8")
@staticmethod
def functionality_denial_response(spoke_id, functionality_request):
"""
Create a functionality denial response message indicating the requested functionality refuses to respond.
Args:
spoke_id (str): The ID of the spoke sending the response.
functionality_request (str): The functionality request that is being denied.
Returns:
bytes: The JSON-encoded functionality denial response message.
"""
message = {}
message["message_type"] = "functionality_denial_response"
message["spoke_id"] = spoke_id
message["response"] = functionality_request + " refuses to respond"
return json.dumps(message).encode("utf-8")
|
import json
import os
from typing import Dict
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class Dropout(nn.Module):
"""Dropout layer.
:param dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({'sentence_embedding': self.dropout_layer(features['sentence_embedding'])})
return features
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump({'dropout': self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0002,
weight_decay=0.05,
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
class SlackSendMessage(SlackBaseTool):
"""Tool for sending a message in Slack."""
name: str = "send_message"
description: str = (
"Use this tool to send a message with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
message: str,
channel: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
result = self.client.chat_postMessage(channel=channel, text=message)
output = "Message sent: " + str(result)
return output
except Exception as e:
return "Error creating conversation: {}".format(e)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
class SlackSendMessage(SlackBaseTool): # type: ignore[override, override]
"""Tool for sending a message in Slack."""
name: str = "send_message"
description: str = (
"Use this tool to send a message with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
message: str,
channel: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
result = self.client.chat_postMessage(channel=channel, text=message)
output = "Message sent: " + str(result)
return output
except Exception as e:
return "Error creating conversation: {}".format(e)
|
from .conv_emformer import ConvEmformer
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
]
|
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
]
|
from typing import Any
from typing_inspect import get_args, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
|
from typing import Any, get_args
from typing_inspect import is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmengine.registry import MODELS
from parameterized import parameterized
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSemiBase(TestCase):
@parameterized.expand([
'soft_teacher/'
'soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.detector.backbone.depth = 18
model.detector.neck.in_channels = [64, 128, 256, 512]
model.detector.backbone.init_cfg = None
model = MODELS.build(model)
self.assertTrue(model.teacher.backbone)
self.assertTrue(model.teacher.neck)
self.assertTrue(model.teacher.rpn_head)
self.assertTrue(model.teacher.roi_head)
self.assertTrue(model.student.backbone)
self.assertTrue(model.student.neck)
self.assertTrue(model.student.rpn_head)
self.assertTrue(model.student.roi_head)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmengine.registry import MODELS
from parameterized import parameterized
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSemiBase(TestCase):
@parameterized.expand([
'soft_teacher/'
'semi_base_faster-rcnn_r50_caffe_fpn_180k_partial_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.detector.backbone.depth = 18
model.detector.neck.in_channels = [64, 128, 256, 512]
model.detector.backbone.init_cfg = None
model = MODELS.build(model)
self.assertTrue(model.teacher.backbone)
self.assertTrue(model.teacher.neck)
self.assertTrue(model.teacher.rpn_head)
self.assertTrue(model.teacher.roi_head)
self.assertTrue(model.student.backbone)
self.assertTrue(model.student.neck)
self.assertTrue(model.student.rpn_head)
self.assertTrue(model.student.roi_head)
|
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
Args:
examples (List[InputExample]): A list of InputExamples.
samples_per_label (int, optional): The number of consecutive, random, and unique samples drawn per label.
The batch size should be a multiple of samples_per_label. Defaults to 2.
with_replacement (bool, optional): If True, each sample is drawn at most once (depending on the total number
of samples per label). If False, one sample can be drawn in multiple draws, but not multiple times in
the same drawing. Defaults to False.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
""" """
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
:param examples:
a list with InputExamples
:param samples_per_label:
the number of consecutive, random and unique samples drawn per label. Batch size should be a multiple of samples_per_label
:param with_replacement:
if this is True, then each sample is drawn at most once (depending on the total number of samples per label).
if this is False, then one sample can be drawn in multiple draws, but still not multiple times in the same
drawing.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
_base_ = './tood_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './tood_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[OpenAIEmbedding.class_name()] = OpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.azure_openai import (
AzureOpenAIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[AzureOpenAIEmbedding.class_name()] = AzureOpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.huggingface_api import (
HuggingFaceInferenceAPIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[
HuggingFaceInferenceAPIEmbedding.class_name()
] = HuggingFaceInferenceAPIEmbedding
except ImportError:
pass
def load_embed_model(data: dict) -> BaseEmbedding:
"""Load Embedding by name."""
if isinstance(data, BaseEmbedding):
return data
name = data.get("class_name")
if name is None:
raise ValueError("Embedding loading requires a class_name")
if name not in RECOGNIZED_EMBEDDINGS:
raise ValueError(f"Invalid Embedding name: {name}")
return RECOGNIZED_EMBEDDINGS[name].from_dict(data)
|
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[OpenAIEmbedding.class_name()] = OpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.azure_openai import (
AzureOpenAIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[AzureOpenAIEmbedding.class_name()] = AzureOpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.huggingface_api import (
HuggingFaceInferenceAPIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[
HuggingFaceInferenceAPIEmbedding.class_name()
] = HuggingFaceInferenceAPIEmbedding
except ImportError:
pass
def load_embed_model(data: dict) -> BaseEmbedding:
"""Load Embedding by name."""
if isinstance(data, BaseEmbedding):
return data
name = data.get("class_name", None)
if name is None:
raise ValueError("Embedding loading requires a class_name")
if name not in RECOGNIZED_EMBEDDINGS:
raise ValueError(f"Invalid Embedding name: {name}")
return RECOGNIZED_EMBEDDINGS[name].from_dict(data)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pydantic import parse_obj_as
from docarray.typing import ImageBytes, ImageTensor, ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
def test_bytes_load():
url = parse_obj_as(ImageUrl, IMAGE_PATHS['png'])
tensor = parse_obj_as(ImageTensor, url.load())
bytes_ = parse_obj_as(ImageBytes, tensor.to_bytes())
tensor_new = parse_obj_as(ImageTensor, bytes_.load())
assert (tensor_new == tensor).all()
|
import os
from pydantic import parse_obj_as
from docarray.typing import ImageBytes, ImageTensor, ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
def test_bytes_load():
url = parse_obj_as(ImageUrl, IMAGE_PATHS['png'])
tensor = parse_obj_as(ImageTensor, url.load())
bytes_ = parse_obj_as(ImageBytes, tensor.to_bytes())
tensor_new = parse_obj_as(ImageTensor, bytes_.load())
assert (tensor_new == tensor).all()
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info(f"{len(train_sentences)} train sentences")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
import torch
from mmdet.models.task_modules import embed_similarity
def test_embed_similarity():
"""Test embed similarity."""
embeds = torch.rand(2, 3)
similarity = embed_similarity(embeds, embeds)
assert similarity.shape == (2, 2)
|
import torch
from mmdet.models.task_modules import embed_similarity
def test_embed_similarity():
"""Test embed similarity."""
embeds = torch.rand(2, 3)
similarity = embed_similarity(embeds, embeds)
assert similarity.shape == (2, 2)
assert torch.allclose(similarity, torch.eye(2))
|
from ._dsp import (
adsr_envelope,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from .functional import barkscale_fbanks
__all__ = [
"adsr_envelope",
"barkscale_fbanks",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
]
|
from ._dsp import (
adsr_envelope,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"deemphasis",
"extend_pitch",
"fftconvolve",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"preemphasis",
"sinc_impulse_response",
"speed",
]
|
"""Standard LangChain interface tests"""
import os
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def enable_vcr_tests(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
@property
def enable_vcr_tests(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
import os
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
from backend.app import run_processes
from backend.executor import DatabaseManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
DatabaseManager(),
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
from backend.app import run_processes
from backend.executor import ExecutionScheduler
from backend.server.rest_api import AgentServer
def main():
"""
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
ExecutionScheduler(),
AgentServer(),
)
if __name__ == "__main__":
main()
|
import random
import torch
from processing import bits_to_normalized_waveform, normalized_waveform_to_bits
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LIBRITTS, LJSPEECH
from torchaudio.transforms import MuLawEncoding
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap a dataset so that, whenever a new item is returned, it is saved to memory."""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms):
self.dataset = dataset
self.transforms = transforms
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
specgram = self.transforms(item[0])
return item[0].squeeze(0), specgram
def split_process_dataset(args, transforms):
if args.dataset == "ljspeech":
data = LJSPEECH(root=args.file_path, download=False)
val_length = int(len(data) * args.val_ratio)
lengths = [len(data) - val_length, val_length]
train_dataset, val_dataset = random_split(data, lengths)
elif args.dataset == "libritts":
train_dataset = LIBRITTS(root=args.file_path, url="train-clean-100", download=False)
val_dataset = LIBRITTS(root=args.file_path, url="dev-clean", download=False)
else:
raise ValueError(f"Expected dataset: `ljspeech` or `libritts`, but found {args.dataset}")
train_dataset = Processed(train_dataset, transforms)
val_dataset = Processed(val_dataset, transforms)
train_dataset = MapMemoryCache(train_dataset)
val_dataset = MapMemoryCache(val_dataset)
return train_dataset, val_dataset
def collate_factory(args):
def raw_collate(batch):
pad = (args.kernel_size - 1) // 2
# input waveform length
wave_length = args.hop_length * args.seq_len_factor
# input spectrogram length
spec_length = args.seq_len_factor + pad * 2
# max start postion in spectrogram
max_offsets = [x[1].shape[-1] - (spec_length + pad * 2) for x in batch]
# random start postion in spectrogram
spec_offsets = [random.randint(0, offset) for offset in max_offsets]
# random start postion in waveform
wave_offsets = [(offset + pad) * args.hop_length for offset in spec_offsets]
waveform_combine = [x[0][wave_offsets[i] : wave_offsets[i] + wave_length + 1] for i, x in enumerate(batch)]
specgram = [x[1][:, spec_offsets[i] : spec_offsets[i] + spec_length] for i, x in enumerate(batch)]
specgram = torch.stack(specgram)
waveform_combine = torch.stack(waveform_combine)
waveform = waveform_combine[:, :wave_length]
target = waveform_combine[:, 1:]
# waveform: [-1, 1], target: [0, 2**bits-1] if loss = 'crossentropy'
if args.loss == "crossentropy":
if args.mulaw:
mulaw_encode = MuLawEncoding(2**args.n_bits)
waveform = mulaw_encode(waveform)
target = mulaw_encode(target)
waveform = bits_to_normalized_waveform(waveform, args.n_bits)
else:
target = normalized_waveform_to_bits(target, args.n_bits)
return waveform.unsqueeze(1), specgram.unsqueeze(1), target.unsqueeze(1)
return raw_collate
|
import random
import torch
from processing import bits_to_normalized_waveform, normalized_waveform_to_bits
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LJSPEECH, LIBRITTS
from torchaudio.transforms import MuLawEncoding
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap a dataset so that, whenever a new item is returned, it is saved to memory."""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms):
self.dataset = dataset
self.transforms = transforms
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
specgram = self.transforms(item[0])
return item[0].squeeze(0), specgram
def split_process_dataset(args, transforms):
if args.dataset == "ljspeech":
data = LJSPEECH(root=args.file_path, download=False)
val_length = int(len(data) * args.val_ratio)
lengths = [len(data) - val_length, val_length]
train_dataset, val_dataset = random_split(data, lengths)
elif args.dataset == "libritts":
train_dataset = LIBRITTS(root=args.file_path, url="train-clean-100", download=False)
val_dataset = LIBRITTS(root=args.file_path, url="dev-clean", download=False)
else:
raise ValueError(f"Expected dataset: `ljspeech` or `libritts`, but found {args.dataset}")
train_dataset = Processed(train_dataset, transforms)
val_dataset = Processed(val_dataset, transforms)
train_dataset = MapMemoryCache(train_dataset)
val_dataset = MapMemoryCache(val_dataset)
return train_dataset, val_dataset
def collate_factory(args):
def raw_collate(batch):
pad = (args.kernel_size - 1) // 2
# input waveform length
wave_length = args.hop_length * args.seq_len_factor
# input spectrogram length
spec_length = args.seq_len_factor + pad * 2
# max start postion in spectrogram
max_offsets = [x[1].shape[-1] - (spec_length + pad * 2) for x in batch]
# random start postion in spectrogram
spec_offsets = [random.randint(0, offset) for offset in max_offsets]
# random start postion in waveform
wave_offsets = [(offset + pad) * args.hop_length for offset in spec_offsets]
waveform_combine = [x[0][wave_offsets[i] : wave_offsets[i] + wave_length + 1] for i, x in enumerate(batch)]
specgram = [x[1][:, spec_offsets[i] : spec_offsets[i] + spec_length] for i, x in enumerate(batch)]
specgram = torch.stack(specgram)
waveform_combine = torch.stack(waveform_combine)
waveform = waveform_combine[:, :wave_length]
target = waveform_combine[:, 1:]
# waveform: [-1, 1], target: [0, 2**bits-1] if loss = 'crossentropy'
if args.loss == "crossentropy":
if args.mulaw:
mulaw_encode = MuLawEncoding(2**args.n_bits)
waveform = mulaw_encode(waveform)
target = mulaw_encode(target)
waveform = bits_to_normalized_waveform(waveform, args.n_bits)
else:
target = normalized_waveform_to_bits(target, args.n_bits)
return waveform.unsqueeze(1), specgram.unsqueeze(1), target.unsqueeze(1)
return raw_collate
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 63.1, Sparsity Ratio: 0.9979
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch returns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
"""Init file of LlamaIndex."""
__version__ = "0.12.22"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.21"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
import argparse
import json
import os
from datetime import date
from pathlib import Path
from slack_sdk import WebClient
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument("--slack_channel_name", default="diffusers-ci-nightly")
def main(slack_channel_name=None):
failed = []
passed = []
group_info = []
total_num_failed = 0
empty_file = False or len(list(Path().glob("*.log"))) == 0
total_empty_files = []
for log in Path().glob("*.log"):
section_num_failed = 0
i = 0
with open(log) as f:
for line in f:
line = json.loads(line)
i += 1
if line.get("nodeid", "") != "":
test = line["nodeid"]
if line.get("duration", None) is not None:
duration = f"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
else:
passed.append([test, duration, log.name.split("_")[0]])
empty_file = i == 0
group_info.append([str(log), section_num_failed, failed])
total_empty_files.append(empty_file)
os.remove(log)
failed = []
text = (
"🌞 There were no failures!"
if not any(total_empty_files)
else "Something went wrong there is at least one empty file - please check GH action results."
)
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
}
message = ""
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "🤗 Results of the Diffusers scheduled nightly tests.",
},
},
]
if total_num_failed > 0:
for i, (name, num_failed, failed_tests) in enumerate(group_info):
if num_failed > 0:
if num_failed == 1:
message += f"*{name}: {num_failed} failed test*\n"
else:
message += f"*{name}: {num_failed} failed tests*\n"
failed_table = []
for test in failed_tests:
failed_table.append(test[0].split("::"))
failed_table = tabulate(
failed_table,
headers=["Test Location", "Test Case", "Test Name"],
showindex="always",
tablefmt="grid",
maxcolwidths=[12, 12, 12],
)
message += "\n```\n" + failed_table + "\n```"
if total_empty_files[i]:
message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n"
print(f"### {message}")
else:
payload.append(no_error_payload)
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {"type": "mrkdwn", "text": message},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/diffusers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if __name__ == "__main__":
args = parser.parse_args()
main(args.slack_channel_name)
|
import argparse
import json
import os
from datetime import date
from pathlib import Path
from slack_sdk import WebClient
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument("--slack_channel_name", default="diffusers-ci-nightly")
def main(slack_channel_name=None):
failed = []
passed = []
group_info = []
total_num_failed = 0
empty_file = False or len(list(Path().glob("*.log"))) == 0
total_empty_files = []
for log in Path().glob("*.log"):
section_num_failed = 0
i = 0
with open(log) as f:
for line in f:
line = json.loads(line)
i += 1
if line.get("nodeid", "") != "":
test = line["nodeid"]
if line.get("duration", None) is not None:
duration = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
else:
passed.append([test, duration, log.name.split("_")[0]])
empty_file = i == 0
group_info.append([str(log), section_num_failed, failed])
total_empty_files.append(empty_file)
os.remove(log)
failed = []
text = (
"🌞 There were no failures!"
if not any(total_empty_files)
else "Something went wrong there is at least one empty file - please check GH action results."
)
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
}
message = ""
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "🤗 Results of the Diffusers scheduled nightly tests.",
},
},
]
if total_num_failed > 0:
for i, (name, num_failed, failed_tests) in enumerate(group_info):
if num_failed > 0:
if num_failed == 1:
message += f"*{name}: {num_failed} failed test*\n"
else:
message += f"*{name}: {num_failed} failed tests*\n"
failed_table = []
for test in failed_tests:
failed_table.append(test[0].split("::"))
failed_table = tabulate(
failed_table,
headers=["Test Location", "Test Case", "Test Name"],
showindex="always",
tablefmt="grid",
maxcolwidths=[12, 12, 12],
)
message += "\n```\n" + failed_table + "\n```"
if total_empty_files[i]:
message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n"
print(f"### {message}")
else:
payload.append(no_error_payload)
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {"type": "mrkdwn", "text": message},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/diffusers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if __name__ == "__main__":
args = parser.parse_args()
main(args.slack_channel_name)
|
from langchain_core.utils.aiter import NoLock, Tee, py_anext
__all__ = ["NoLock", "Tee", "py_anext"]
|
from langchain_core.utils.aiter import NoLock, Tee, py_anext
__all__ = ["py_anext", "NoLock", "Tee"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@MODELS.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@MODELS.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Ask Update to TOM on loss to implement
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Ask Update to TOM on loss to implement
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from collections.abc import Generator
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MultipleNegativesRankingLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
num_negatives: int | None = 4,
scale: int = 20.0,
activation_fct: nn.Module | None = nn.Tanh(),
) -> None:
super().__init__()
self.model = model
self.num_negatives = num_negatives
self.scale = scale
self.activation_fct = activation_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def call_model_with_columns(self, anchors: list[str], candidates: list[str]) -> Tensor:
pairs = list(zip(anchors, candidates))
return self.call_model_with_pairs(pairs)
def call_model_with_pairs(self, pairs: list[list[str]]) -> Tensor:
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
return logits.squeeze(1)
def get_in_batch_negatives(
self, anchors: list[str], candidates: list[list[str]]
) -> Generator[list[str], None, None]:
batch_size = len(anchors)
num_candidates = len(candidates)
# Given N anchors, we want to select num_negatives negatives for each anchor
candidates_flattened = [candidate for sublist in candidates for candidate in sublist]
if self.num_negatives is not None:
# Create a mask for each anchor to each candidate index, where the matching positive
# and hard negatives are masked out. From the remaining options, we randomly select
# num_negatives indices.
mask = ~torch.eye(batch_size, dtype=torch.bool).repeat(1, num_candidates)
negative_indices = torch.multinomial(mask.float(), self.num_negatives)
else:
# If num_negatives is None, we select all negatives
negative_indices = torch.arange(len(candidates[0])).repeat(len(candidates), 1)
for negative_indices_row in negative_indices.T:
yield [candidates_flattened[negative_idx] for negative_idx in negative_indices_row]
def calculate_loss(self, logits: Tensor, batch_size: int) -> Tensor:
# (bsz, 1 + num_rand_negatives + num_hard_negatives)
logits = torch.cat(logits, dim=0).reshape(-1, batch_size).T
# Apply the post-processing on the logits
if self.activation_fct:
logits = self.activation_fct(logits)
if self.scale:
logits = logits * self.scale
# For each sample in the batch, the first label is the positive, the rest are negatives
labels = torch.zeros(batch_size, device=logits.device, dtype=torch.long)
loss = self.cross_entropy_loss(logits, labels)
return loss
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
anchors = inputs[0]
positives = inputs[1]
batch_size = len(anchors)
scores = [self.call_model_with_columns(anchors, positives)]
# In-batch negatives:
for negatives in self.get_in_batch_negatives(anchors, inputs[1:]):
scores.append(self.call_model_with_columns(anchors, negatives))
# Hard negatives:
for negatives in inputs[2:]:
scores.append(self.call_model_with_columns(anchors, negatives))
return self.calculate_loss(scores, batch_size)
def get_config_dict(self) -> dict[str, float]:
return {
"scale": self.scale,
"num_negatives": self.num_negatives,
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from collections.abc import Generator
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
class MultipleNegativesRankingLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
num_negatives: int | None = 4,
scale: int = 20.0,
activation_fct: nn.Module | None = nn.Tanh(),
) -> None:
super().__init__()
self.model = model
self.num_negatives = num_negatives
self.scale = scale
self.activation_fct = activation_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def call_model_with_columns(self, anchors: list[str], candidates: list[str]) -> Tensor:
pairs = list(zip(anchors, candidates))
return self.call_model_with_pairs(pairs)
def call_model_with_pairs(self, pairs: list[list[str]]) -> Tensor:
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
return logits.squeeze(1)
def get_in_batch_negatives(
self, anchors: list[str], candidates: list[list[str]]
) -> Generator[list[str], None, None]:
batch_size = len(anchors)
num_candidates = len(candidates)
# Given N anchors, we want to select num_negatives negatives for each anchor
candidates_flattened = [candidate for sublist in candidates for candidate in sublist]
if self.num_negatives is not None:
# Create a mask for each anchor to each candidate index, where the matching positive
# and hard negatives are masked out. From the remaining options, we randomly select
# num_negatives indices.
mask = ~torch.eye(batch_size, dtype=torch.bool).repeat(1, num_candidates)
negative_indices = torch.multinomial(mask.float(), self.num_negatives)
else:
# If num_negatives is None, we select all negatives
negative_indices = torch.arange(len(candidates[0])).repeat(len(candidates), 1)
for negative_indices_row in negative_indices.T:
yield [candidates_flattened[negative_idx] for negative_idx in negative_indices_row]
def calculate_loss(self, logits: Tensor, batch_size: int) -> Tensor:
# (bsz, 1 + num_rand_negatives + num_hard_negatives)
logits = torch.cat(logits, dim=0).reshape(-1, batch_size).T
# Apply the post-processing on the logits
if self.activation_fct:
logits = self.activation_fct(logits)
if self.scale:
logits = logits * self.scale
# For each sample in the batch, the first label is the positive, the rest are negatives
labels = torch.zeros(batch_size, device=logits.device, dtype=torch.long)
loss = self.cross_entropy_loss(logits, labels)
return loss
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
anchors = inputs[0]
positives = inputs[1]
batch_size = len(anchors)
scores = [self.call_model_with_columns(anchors, positives)]
# In-batch negatives:
for negatives in self.get_in_batch_negatives(anchors, inputs[1:]):
scores.append(self.call_model_with_columns(anchors, negatives))
# Hard negatives:
for negatives in inputs[2:]:
scores.append(self.call_model_with_columns(anchors, negatives))
return self.calculate_loss(scores, batch_size)
def get_config_dict(self) -> dict[str, float]:
return {"scale": self.scale, "num_negatives": self.num_negatives}
|
from xgboost.testing.parse_tree import (
run_split_value_histograms,
run_tree_to_df_categorical,
)
def test_tree_to_df_categorical() -> None:
run_tree_to_df_categorical("hist", "cuda")
def test_split_value_histograms() -> None:
run_split_value_histograms("hist", "cuda")
|
import sys
sys.path.append("tests/python")
from test_parse_tree import TestTreesToDataFrame
def test_tree_to_df_categorical():
cputest = TestTreesToDataFrame()
cputest.run_tree_to_df_categorical("gpu_hist")
def test_split_value_histograms():
cputest = TestTreesToDataFrame()
cputest.run_split_value_histograms("gpu_hist")
|
from typing import Dict, Optional, Sequence
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPModel, CLIPTokenizer
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using the CLIP model."""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: int = 77,
device: str = 'cpu',
traversal_paths: Sequence[str] = ['r'],
batch_size: int = 32,
*args,
**kwargs,
):
"""
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g.
`./my_model_directory/`
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Pytorch device to put the model on, e.g. 'cpu', 'cuda', 'cuda:1'
:param traversal_paths: Default traversal paths for encoding, used if
the traversal path is not passed as a parameter with the request.
:param batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
"""
super().__init__(*args, **kwargs)
self.default_traversal_paths = traversal_paths
self.default_batch_size = batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all documents with the `text` attribute and store the embeddings in the
`embedding` attribute.
:param docs: DocumentArray containing the Documents to be encoded
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
for docs_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = docs_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embeddings = self.model.get_text_features(**input_tokens).cpu().numpy()
for doc, embedding in zip(docs_batch, embeddings):
doc.embedding = embedding
def _generate_input_tokens(self, texts: Sequence[str]):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {k: v.to(self.device) for k, v in input_tokens.items()}
return input_tokens
|
from typing import Dict, Optional, Sequence
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPModel, CLIPTokenizer
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g., ./my_model_directory/
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Device to be used. Use 'cuda' for GPU.
:param default_traversal_paths: Default traversal paths for encoding, used if the
traversal path is not passed as a parameter with the request.
:param default_batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
:param args: Arguments
:param kwargs: Keyword Arguments
"""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: Optional[int] = 77,
device: str = 'cpu',
default_traversal_paths: Sequence[str] = ['r'],
default_batch_size: int = 32,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode text data into a ndarray of `D` as dimension, and fill
the embedding attribute of the docs.
:param docs: DocumentArray containing text
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
for docs_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = docs_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embeddings = self.model.get_text_features(**input_tokens).cpu().numpy()
for doc, embedding in zip(docs_batch, embeddings):
doc.embedding = embedding
def _generate_input_tokens(self, texts: Sequence[str]):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {k: v.to(self.device) for k, v in input_tokens.items()}
return input_tokens
|
_base_ = './fovea_r50_fpn_4xb4-2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './fovea_r50_fpn_4x4_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Check out the paper introducing this method: https://huggingface.co/papers/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[list[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: dict[str, Union[int, str]]) -> dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Check out the paper introducing this method: https://huggingface.co/papers/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
from __future__ import annotations
import collections
import json
import os
import string
from typing import Iterable
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> list[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json")) as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from __future__ import annotations
import collections
import json
import os
import string
from typing import Iterable
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> list[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image import ImageNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> ImageNdArray:
"""
Load the image from the ImageBytes into an ImageNdArray
---
```python
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: ImageNdArray representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
img = self._move_channel_axis(tensor, axis_layout=axis_layout)
return parse_obj_as(ImageNdArray, img)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load_pil(
self,
) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
return PILImage.open(BytesIO(self))
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
raw_img = self.load_pil()
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCosineSimilarityLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCosineSimilarityLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
from docarray.index.backends.elastic import ElasticV7DocIndex
from docarray.index.backends.hnswlib import HnswDocumentIndex
__all__ = ['HnswDocumentIndex', 'ElasticV7DocIndex']
|
import time
import unittest
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.testing_utils import require_flash_attn, require_torch_gpu, slow
_TEST_PROMPTS = [
"A man is a walking his dog down the street, and a the turn he sees",
"Describe a fruit that is of orange color and round. It is a sweet fruit and a great source of Vitamine C. The fruit I'm thinking of is an",
"A plane is flying high in the sky, out of the window are clouds and mountains. Where could the plane be located?",
"Please fill in the form to",
"For safety reasons, the train is stopped in the middle of the",
]
_EXPECTED_OUTPUTS = [
"a woman standing on the sidewalk, looking at him. He is immediately drawn to her and feels a strong attraction. He walks up to her and strikes up a conversation, and they quickly discover that they have a lot in common. They exchange numbers and",
"orange.\n\n## Step 1: Identify the key characteristics of the fruit\nThe fruit is described as being orange in color and round in shape.\n\n## Step 2: Determine the taste and nutritional value of the fruit\nThe fruit is described as sweet",
"This riddle is a classic example of a lateral thinking puzzle, which requires the test-taker to think creatively and consider multiple possibilities. The answer is not a straightforward one, and it requires some lateral thinking to arrive at the correct solution.",
"get in touch with us. We will respond to your message as soon as possible.\n\n[Your Name]\n[Your Email]\n[Your Phone Number]\n[Your Message]\n\nWe are looking forward to hearing from you!\n\n[Insert Contact Information]\n\nNote:",
"track. The train is stopped for 30 minutes. The train is moving at a speed of 60 km/h. How many kilometers does the train travel in 30 minutes?\n## Step 1: Convert the speed from km/h to km/min",
]
@slow
@require_torch_gpu
@require_flash_attn
class TestBatchGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-3b-Instruct", torch_dtype="bfloat16", device_map="auto"
).eval()
cls.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3b-Instruct", padding_side="left")
if cls.tokenizer.pad_token is None:
cls.tokenizer.pad_token = cls.tokenizer.eos_token
cls.model.config.pad_token_id = cls.model.config.eos_token_id
cls.model.use_cache = False
@parameterized.expand(
[
("eager_paged", 64, 128, 64),
("sdpa_paged", 32, 256, 128),
("paged_attention", 16, 512, 256),
("flex_paged", 64, 128, 64),
]
)
def test_generate_batch_consistency(self, attn_impl, num_blocks, block_size, max_batch_tokens):
self.model.config.attn_implementation = attn_impl
generation_config = GenerationConfig(
max_new_tokens=50,
top_k=0,
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_cache=False,
num_blocks=num_blocks,
block_size=block_size,
max_batch_tokens=max_batch_tokens,
)
tokenized = self.tokenizer(_TEST_PROMPTS, truncation=True, max_length=512)
batch_inputs = list(tokenized["input_ids"])
start = time.time()
batch_outputs = self.model.generate_batch(
inputs=batch_inputs,
generation_config=generation_config,
)
end = time.time()
print(
f"\n[{attn_impl}] Batch took {end - start:.2f}s with config: blocks={num_blocks}, block_size={block_size}, max_batch_tokens={max_batch_tokens}"
)
for i, req_id in enumerate(batch_outputs):
generated = self.tokenizer.decode(batch_outputs[req_id].static_outputs, skip_special_tokens=False).strip()
expected = _EXPECTED_OUTPUTS[i].strip()
self.assertTrue(
generated.startswith(expected),
msg=f"[{attn_impl}] Mismatch in request {i}:\nExpected start: {expected}\nGot: {generated}",
)
|
import time
import unittest
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.testing_utils import require_flash_attn, require_torch_gpu, run_slow
_TEST_PROMPTS = [
"A man is a walking his dog down the street, and a the turn he sees",
"Describe a fruit that is of orange color and round. It is a sweet fruit and a great source of Vitamine C. The fruit I'm thinking of is an",
"A plane is flying high in the sky, out of the window are clouds and mountains. Where could the plane be located?",
"Please fill in the form to",
"For safety reasons, the train is stopped in the middle of the",
]
_EXPECTED_OUTPUTS = [
"a woman standing on the sidewalk, looking at him. He is immediately drawn to her and feels a strong attraction. He walks up to her and strikes up a conversation, and they quickly discover that they have a lot in common. They exchange numbers and",
"orange.\n\n## Step 1: Identify the key characteristics of the fruit\nThe fruit is described as being orange in color and round in shape.\n\n## Step 2: Determine the taste and nutritional value of the fruit\nThe fruit is described as sweet",
"This riddle is a classic example of a lateral thinking puzzle, which requires the test-taker to think creatively and consider multiple possibilities. The answer is not a straightforward one, and it requires some lateral thinking to arrive at the correct solution.",
"get in touch with us. We will respond to your message as soon as possible.\n\n[Your Name]\n[Your Email]\n[Your Phone Number]\n[Your Message]\n\nWe are looking forward to hearing from you!\n\n[Insert Contact Information]\n\nNote:",
"track. The train is stopped for 30 minutes. The train is moving at a speed of 60 km/h. How many kilometers does the train travel in 30 minutes?\n## Step 1: Convert the speed from km/h to km/min",
]
@run_slow
@require_torch_gpu
@require_flash_attn
class TestBatchGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-3b-Instruct", torch_dtype="bfloat16", device_map="auto"
).eval()
cls.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3b-Instruct", padding_side="left")
if cls.tokenizer.pad_token is None:
cls.tokenizer.pad_token = cls.tokenizer.eos_token
cls.model.config.pad_token_id = cls.model.config.eos_token_id
cls.model.use_cache = False
@parameterized.expand(
[
("eager_paged", 64, 128, 64),
("sdpa_paged", 32, 256, 128),
("paged_attention", 16, 512, 256),
("flex_paged", 64, 128, 64),
]
)
def test_generate_batch_consistency(self, attn_impl, num_blocks, block_size, max_batch_tokens):
self.model.config.attn_implementation = attn_impl
generation_config = GenerationConfig(
max_new_tokens=50,
top_k=0,
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_cache=False,
num_blocks=num_blocks,
block_size=block_size,
max_batch_tokens=max_batch_tokens,
)
tokenized = self.tokenizer(_TEST_PROMPTS, truncation=True, max_length=512)
batch_inputs = list(tokenized["input_ids"])
start = time.time()
batch_outputs = self.model.generate_batch(
inputs=batch_inputs,
generation_config=generation_config,
)
end = time.time()
print(
f"\n[{attn_impl}] Batch took {end - start:.2f}s with config: blocks={num_blocks}, block_size={block_size}, max_batch_tokens={max_batch_tokens}"
)
for i, req_id in enumerate(batch_outputs):
generated = self.tokenizer.decode(batch_outputs[req_id].static_outputs, skip_special_tokens=False).strip()
expected = _EXPECTED_OUTPUTS[i].strip()
self.assertTrue(
generated.startswith(expected),
msg=f"[{attn_impl}] Mismatch in request {i}:\nExpected start: {expected}\nGot: {generated}",
)
|
__version__ = '0.14.9'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.8'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
from jina import Document, Flow, DocumentArray
from ...custom_image_torch_encoder import CustomImageTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_video_torch_encoder():
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
f = Flow().add(uses={'jtype': 'CustomImageTorchEncoder',
'with': {'model_state_dict_path': model_state_dict_path,
'layer_name': 'conv1',
'model_definition_file': os.path.join(cur_dir, '../model/external_model.py'),
'model_class_name': 'ExternalModel'}})
with f:
resp = f.post(on='/test', inputs=docs,
return_results=True)
assert resp[0].docs[0].embedding.shape == (10,)
assert resp[0].docs[1].embedding.shape == (10,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
from jina import Document, Flow, DocumentArray
try:
from custom_image_torch_encoder import CustomImageTorchEncoder
except:
from jinahub.encoder.custom_image_torch_encoder import CustomImageTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_video_torch_encoder():
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
f = Flow().add(uses={'jtype': 'CustomImageTorchEncoder',
'with': {'model_state_dict_path': model_state_dict_path,
'layer_name': 'conv1',
'model_definition_file': os.path.join(cur_dir, '../model/external_model.py'),
'model_class_name': 'ExternalModel'}})
with f:
resp = f.post(on='/test', inputs=docs,
return_results=True)
assert resp[0].docs[0].embedding.shape == (10,)
assert resp[0].docs[1].embedding.shape == (10,)
|
"""Standard LangChain interface tests"""
import os
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
"""Standard LangChain interface tests"""
import os
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
from docarray.typing.embedding import Embedding
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID']
|
from docarray.document.base_node import BaseNode
from docarray.typing.ndarray import Embedding, Tensor
from docarray.typing.url import ImageUrl
__all__ = ['Tensor', 'Embedding', 'BaseNode', 'ImageUrl']
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[190],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CentripetalHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=0,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
# TODO: mstest is not currently implemented
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args={{_base_.file_client_args}}),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[190],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (16 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
from langchain_core.tracers import schemas
from langchain_core.tracers.schemas import __all__ as schemas_all
def test_public_api() -> None:
"""Test for changes in the public API."""
expected_all = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
assert sorted(schemas_all) == expected_all
# Assert that the object is actually present in the schema module
for module_name in expected_all:
assert hasattr(schemas, module_name)
assert getattr(schemas, module_name) is not None
|
import langchain_core.tracers.schemas as schemas
from langchain_core.tracers.schemas import __all__ as schemas_all
def test_public_api() -> None:
"""Test for changes in the public API."""
expected_all = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
assert sorted(schemas_all) == expected_all
# Assert that the object is actually present in the schema module
for module_name in expected_all:
assert hasattr(schemas, module_name)
assert getattr(schemas, module_name) is not None
|
import numpy as np
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import NdArray
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import numpy as np
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import NdArray
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray.__docarray_from_native__(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray.__docarray_from_native__(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, List, Union
import numpy as np
import tensorflow as tf
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
class ImageTFEncoder(Executor):
"""
:class:`ImageTFEncoder` encodes ``Document`` content from a ndarray,
potentially B x (Height x Width x Channel) into a ndarray of `B x D`.
Where `B` is the batch size and `D` is the Dimension.
The :class:`ImageTFEncoder` wraps the models from
`tensorflow.keras.applications`.
<https://www.tensorflow.org/api_docs/python/tf/keras/applications>`_
"""
def __init__(
self,
model_name: str = 'MobileNetV2',
img_shape: int = 336,
pool_strategy: str = 'max',
default_batch_size: int = 32,
default_traversal_paths: Union[List[str], str] = None,
device: str = '/CPU:0',
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``DenseNet121``, ``DenseNet169``, ``DenseNet201``,
``InceptionResNetV2``, ``InceptionV3``, ``MobileNet``,
``MobileNetV2``, ``NASNetLarge``, ``NASNetMobile``,
``ResNet101``, ``ResNet152``, ``ResNet50``, ``ResNet101V2``,
``ResNet152V2``, ``ResNet50V2``, ``VGG16``, ``VGG19``,
``Xception`` and etc. A full list can be find at
<https://www.tensorflow.org/api_docs/python/tf/keras/applications#functions>`_
:param img_shape: The shape of the image to be encoded.
:param pool_strategy: the pooling strategy. Options are:
- `None`: Means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg`: ;eans that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max`: Means that global max pooling will be applied.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param device: Device ('/CPU:0', '/GPU:0', '/GPU:X')
"""
super().__init__(*args, **kwargs)
if default_traversal_paths is None:
default_traversal_paths = ['r']
self.model_name = model_name
self.pool_strategy = pool_strategy
self.img_shape = img_shape
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.logger = JinaLogger(self.__class__.__name__)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if 'GPU' in device:
gpu_index = 0 if 'GPU:' not in device else int(device.split(':')[-1])
if len(gpus) < gpu_index + 1:
raise RuntimeError(f'Device {device} not found on your system!')
self.device = tf.device(device)
with self.device:
model = getattr(tf.keras.applications, self.model_name)(
input_shape=(self.img_shape, self.img_shape, 3),
include_top=False,
pooling=self.pool_strategy,
weights='imagenet',
)
model.trainable = False
self.model = model
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Encode document content into a ndarray of `B x D`. `
B` is the batch size and `D` is the Dimension.
:param docs: DocumentArray containing blob as image data.
:param parameters: parameters dictionary.
:param kwargs: additional keyword arguments.
:return: Encoded result as a `BatchSize x D` numpy ``ndarray``,
`D` is the output dimension
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
for document_batch in document_batches_generator:
blob_batch = np.stack([d.blob for d in document_batch])
with self.device:
embedding_batch = self.model(blob_batch)
for document, embedding in zip(document_batch, embedding_batch):
document.embedding = np.array(embedding)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, List, Union
import numpy as np
import tensorflow as tf
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
class ImageTFEncoder(Executor):
"""
:class:`ImageTFEncoder` encodes ``Document`` content from a ndarray,
potentially B x (Height x Width x Channel) into a ndarray of `B x D`.
Where `B` is the batch size and `D` is the Dimension.
The :class:`ImageTFEncoder` wraps the models from
`tensorflow.keras.applications`.
<https://www.tensorflow.org/api_docs/python/tf/keras/applications>`_
:param model_name: the name of the model. Supported models include
``DenseNet121``, ``DenseNet169``, ``DenseNet201``,
``InceptionResNetV2``, ``InceptionV3``, ``MobileNet``,
``MobileNetV2``, ``NASNetLarge``, ``NASNetMobile``,
``ResNet101``, ``ResNet152``, ``ResNet50``, ``ResNet101V2``,
``ResNet152V2``, ``ResNet50V2``, ``VGG16``, ``VGG19``,
``Xception`` and etc. A full list can be find at
<https://www.tensorflow.org/api_docs/python/tf/keras/applications#functions>`_
:param img_shape: The shape of the image to be encoded.
:param pool_strategy: the pooling strategy. Options are:
- `None`: Means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg`: ;eans that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max`: Means that global max pooling will be applied.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param device: Device ('/CPU:0', '/GPU:0', '/GPU:X')
:param args: additional positional arguments.
:param kwargs: additional positional arguments.
"""
def __init__(
self,
model_name: str = 'MobileNetV2',
img_shape: int = 336,
pool_strategy: str = 'max',
default_batch_size: int = 32,
default_traversal_paths: Union[List[str], str] = None,
device: str = '/CPU:0',
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
if default_traversal_paths is None:
default_traversal_paths = ['r']
self.model_name = model_name
self.pool_strategy = pool_strategy
self.img_shape = img_shape
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.logger = JinaLogger(self.__class__.__name__)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if 'GPU' in device:
gpu_index = 0 if 'GPU:' not in device else int(device.split(':')[-1])
if len(gpus) < gpu_index + 1:
raise RuntimeError(f'Device {device} not found on your system!')
self.device = tf.device(device)
with self.device:
model = getattr(tf.keras.applications, self.model_name)(
input_shape=(self.img_shape, self.img_shape, 3),
include_top=False,
pooling=self.pool_strategy,
weights='imagenet',
)
model.trainable = False
self.model = model
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Encode document content into a ndarray of `B x D`. `
B` is the batch size and `D` is the Dimension.
:param docs: DocumentArray containing blob as image data.
:param args: additional positional arguments.
:param kwargs: additional positional arguments.
:return: Encoded result as a `BatchSize x D` numpy ``ndarray``,
`D` is the output dimension
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
for document_batch in document_batches_generator:
blob_batch = np.stack([d.blob for d in document_batch])
with self.device:
embedding_batch = self.model(blob_batch)
for document, embedding in zip(document_batch, embedding_batch):
document.embedding = np.array(embedding)
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.experiment_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{epoch}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
resume_from_checkpoint=args.resume_from_checkpoint,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.md == "av":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--md",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--experiment-name",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=8,
type=int,
help="Number of nodes to use for training. (Default: 8)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint", default=None, type=str, help="Path to the checkpoint to resume from"
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.experiment_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{{epoch}}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
resume_from_checkpoint=args.resume_from_checkpoint,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.md == "av":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--md",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--dataset-path",
type=str,
help="Path to LRW audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--experiment-name",
default="online_avsr_public_test",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=8,
type=int,
help="Number of nodes to use for training. (Default: 8)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint", default=None, type=str, help="Path to the checkpoint to resume from"
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.3',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.0.1',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.0.1',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from langchain_core.tracers.langchain import (
LangChainTracer,
get_client,
log_error_once,
wait_for_all_tracers,
)
__all__ = ["LangChainTracer", "get_client", "log_error_once", "wait_for_all_tracers"]
|
from langchain_core.tracers.langchain import (
LangChainTracer,
get_client,
log_error_once,
wait_for_all_tracers,
)
__all__ = ["log_error_once", "wait_for_all_tracers", "get_client", "LangChainTracer"]
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero (active) elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero (active) elements in the embeddings.
If specified, only embeddings with more than this number of non-zero (active) elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the tf.test.benchmark."""
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class BenchmarkTest(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = self.get_temp_dir() + os.path.sep
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
always_print_fields_with_no_presence=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the tf.test.benchmark."""
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class BenchmarkTest(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = self.get_temp_dir() + os.path.sep
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
including_default_value_fields=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/sentence_transformer/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/sentence_transformer/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn import __version__ as _sklearn_version
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
try:
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
# sklearn.utils Tags types can be imported unconditionally once
# xgboost's minimum scikit-learn version is 1.6 or higher
try:
from sklearn.utils import Tags as _sklearn_Tags
except ImportError:
_sklearn_Tags = object
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
class XGBModelBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.BaseEstimator."""
class XGBClassifierBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.ClassifierMixin."""
class XGBRegressorBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.RegressorMixin."""
XGBStratifiedKFold = None
_sklearn_Tags = object
_sklearn_version = object
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError(f"Unknown type: {type(value[0])}")
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn import __version__ as _sklearn_version
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
try:
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
# sklearn.utils Tags types can be imported unconditionally once
# xgboost's minimum scikit-learn version is 1.6 or higher
try:
from sklearn.utils import Tags as _sklearn_Tags
except ImportError:
_sklearn_Tags = object
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
class XGBModelBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.BaseEstimator."""
class XGBClassifierBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.ClassifierMixin."""
class XGBRegressorBase: # type: ignore[no-redef]
"""Dummy class for sklearn.base.RegressorMixin."""
XGBStratifiedKFold = None
_sklearn_Tags = object
_sklearn_version = object
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.