input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import asyncio
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(streamer=self.streamer, **gateway_kwargs)
gateway.streamer = self.streamer
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
tasks = []
for gateway in self.gateways:
tasks.append(asyncio.create_task(gateway.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
shutdown_tasks = []
for gateway in self.gateways:
shutdown_tasks.append(asyncio.create_task(gateway.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run GRPC server forever"""
run_server_tasks = []
for gateway in self.gateways:
run_server_tasks.append(asyncio.create_task(gateway.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
import asyncio
import copy
from typing import Any, List, Optional
from jina.serve.gateway import BaseGateway
class CompositeGateway(BaseGateway):
"""GRPC Gateway implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.gateways: List[BaseGateway] = []
for port, protocol in zip(self.ports, self.protocols):
gateway_cls = _get_gateway_class(protocol)
# ignore metrics_registry since it is not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ['metrics_registry']
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
gateway = gateway_cls(**gateway_kwargs)
gateway.streamer = self.streamer
self.gateways.append(gateway)
async def setup_server(self):
"""
setup GRPC server
"""
tasks = []
for gateway in self.gateways:
tasks.append(asyncio.create_task(gateway.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
shutdown_tasks = []
for gateway in self.gateways:
shutdown_tasks.append(asyncio.create_task(gateway.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run GRPC server forever"""
run_server_tasks = []
for gateway in self.gateways:
run_server_tasks.append(asyncio.create_task(gateway.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(gateway.server, 'should_exit', True) for gateway in self.gateways
]
return all(should_exit_values)
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of PAA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of PAA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class MRKLOutputParser(AgentOutputParser):
"""MRKL Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
"""
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match and includes_answer:
if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)):
# if final answer is before the hallucination, return final answer
start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION)
end_index = text.find("\n\n", start_index)
return AgentFinish(
{"output": text[start_index:end_index].strip()}, text[:end_index]
)
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
if action_match:
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
if includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
if not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "mrkl"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class MRKLOutputParser(AgentOutputParser):
"""MRKL Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
"""
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match and includes_answer:
if text.find(FINAL_ANSWER_ACTION) < text.find(action_match.group(0)):
# if final answer is before the hallucination, return final answer
start_index = text.find(FINAL_ANSWER_ACTION) + len(FINAL_ANSWER_ACTION)
end_index = text.find("\n\n", start_index)
return AgentFinish(
{"output": text[start_index:end_index].strip()}, text[:end_index]
)
else:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
if action_match:
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "mrkl"
|
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
__all__ = ['VideoNdArray']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(['VideoTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.video.video_tensorflow_tensor import ( # noqa: F401
VideoTensorFlowTensor,
)
__all__.extend(['VideoTensorFlowTensor'])
|
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
__all__ = ['VideoNdArray']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(['VideoTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.video.video_tensorflow_tensor import ( # noqa: F401
VideoTensorFlowTensor,
)
__all__.extend(['VideoTensorFlowTensor'])
|
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (features.BoundingBox,)
def __init__(self, format: Union[str, features.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = features.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return features.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertImageDtype(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, features.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
) -> Union[features.TensorImageType, features.TensorVideoType]:
# TODO: the `inpt.as_subclass(torch.Tensor)` call can be removed as soon as we have a proper dispatcher that
# handles this. See https://github.com/pytorch/vision/pull/6783 for details.
output = F.convert_image_dtype(inpt.as_subclass(torch.Tensor), dtype=self.dtype)
return (
output if features.is_simple_tensor(inpt) else type(inpt).wrap_like(inpt, output) # type: ignore[attr-defined]
)
class ConvertColorSpace(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, PIL.Image.Image, features.Video)
def __init__(
self,
color_space: Union[str, features.ColorSpace],
old_color_space: Optional[Union[str, features.ColorSpace]] = None,
copy: bool = True,
) -> None:
super().__init__()
if isinstance(color_space, str):
color_space = features.ColorSpace.from_str(color_space)
self.color_space = color_space
if isinstance(old_color_space, str):
old_color_space = features.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space
self.copy = copy
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
return F.convert_color_space(
inpt, color_space=self.color_space, old_color_space=self.old_color_space, copy=self.copy
)
class ClampBoundingBoxes(Transform):
_transformed_types = (features.BoundingBox,)
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `clamp_bounding_box` does not have a dispatcher function that would do that for us
output = F.clamp_bounding_box(
inpt.as_subclass(torch.Tensor), format=inpt.format, spatial_size=inpt.spatial_size
)
return features.BoundingBox.wrap_like(inpt, output)
|
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (features.BoundingBox,)
def __init__(self, format: Union[str, features.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = features.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
output = F.convert_format_bounding_box(inpt, old_format=inpt.format, new_format=params["format"])
return features.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertImageDtype(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, features.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
) -> Union[features.TensorImageType, features.TensorVideoType]:
output = F.convert_image_dtype(inpt, dtype=self.dtype)
return (
output if features.is_simple_tensor(inpt) else type(inpt).wrap_like(inpt, output) # type: ignore[attr-defined]
)
class ConvertColorSpace(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, PIL.Image.Image, features.Video)
def __init__(
self,
color_space: Union[str, features.ColorSpace],
old_color_space: Optional[Union[str, features.ColorSpace]] = None,
copy: bool = True,
) -> None:
super().__init__()
if isinstance(color_space, str):
color_space = features.ColorSpace.from_str(color_space)
self.color_space = color_space
if isinstance(old_color_space, str):
old_color_space = features.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space
self.copy = copy
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
return F.convert_color_space(
inpt, color_space=self.color_space, old_color_space=self.old_color_space, copy=self.copy
)
class ClampBoundingBoxes(Transform):
_transformed_types = (features.BoundingBox,)
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
output = F.clamp_bounding_box(inpt, format=inpt.format, spatial_size=inpt.spatial_size)
return features.BoundingBox.wrap_like(inpt, output)
|
from langchain.chains.structured_output.base import (
create_openai_fn_runnable,
create_structured_output_runnable,
)
__all__ = ["create_openai_fn_runnable", "create_structured_output_runnable"]
|
from langchain.chains.structured_output.base import (
create_openai_fn_runnable,
create_structured_output_runnable,
)
__all__ = ["create_structured_output_runnable", "create_openai_fn_runnable"]
|
import asyncio
import copy
from typing import Any, List
from jina.serve.runtimes.servers import BaseServer
class CompositeServer(BaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for port, protocol in zip(self.ports, self.protocols):
server_cls = _get_gateway_class(protocol, works_as_load_balancer=self.works_as_load_balancer)
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
server_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
async def setup_server(self):
"""
setup servers inside CompositeServer
"""
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run servers inside CompositeServer forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
|
import asyncio
import copy
from typing import Any, List
from jina.serve.runtimes.servers import BaseServer
class CompositeServer(BaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for port, protocol in zip(self.ports, self.protocols):
server_cls = _get_gateway_class(protocol)
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = [port]
runtime_args.protocol = [protocol]
server_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
async def setup_server(self):
"""
setup GRPC server
"""
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
async def run_server(self):
"""Run GRPC server forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server.server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
|
from enum import Enum
from fsspec import AbstractFileSystem
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Protocol, runtime_checkable
import json
import uuid
from docling.document_converter import DocumentConverter
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core import Document as LIDocument
from pydantic import Field
class DoclingReader(BasePydanticReader):
"""
Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str: ...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""
Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(source).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
|
from enum import Enum
from fsspec import AbstractFileSystem
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Protocol, runtime_checkable
import json
import uuid
from docling.document_converter import DocumentConverter
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core import Document as LIDocument
from pydantic import Field
class DoclingReader(BasePydanticReader):
"""
Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str:
...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""
Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(source).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
|
from typing import Optional, Union
import torch
from torch import nn, Tensor
def _cat(tensors: list[Tensor], dim: int = 0) -> Tensor:
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
# TODO add back the assert
# assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def convert_boxes_to_roi_format(boxes: list[Tensor]) -> Tensor:
concat_boxes = _cat([b for b in boxes], dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def check_roi_boxes_shape(boxes: Union[Tensor, list[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
torch._assert(
_tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
)
elif isinstance(boxes, torch.Tensor):
torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
else:
torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
return
def split_normalization_params(
model: nn.Module, norm_classes: Optional[list[type]] = None
) -> tuple[list[Tensor], list[Tensor]]:
# Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501
if not norm_classes:
norm_classes = [
nn.modules.batchnorm._BatchNorm,
nn.LayerNorm,
nn.GroupNorm,
nn.modules.instancenorm._InstanceNorm,
nn.LocalResponseNorm,
]
for t in norm_classes:
if not issubclass(t, nn.Module):
raise ValueError(f"Class {t} is not a subclass of nn.Module.")
classes = tuple(norm_classes)
norm_params = []
other_params = []
for module in model.modules():
if next(module.children(), None):
other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad)
elif isinstance(module, classes):
norm_params.extend(p for p in module.parameters() if p.requires_grad)
else:
other_params.extend(p for p in module.parameters() if p.requires_grad)
return norm_params, other_params
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
def _upcast_non_float(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.dtype not in (torch.float32, torch.float64):
return t.float()
return t
def _loss_inter_union(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
return intsctk, unionk
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn, Tensor
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
# TODO add back the assert
# assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = _cat([b for b in boxes], dim=0)
temp = []
for i, b in enumerate(boxes):
temp.append(torch.full_like(b[:, :1], i))
ids = _cat(temp, dim=0)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
torch._assert(
_tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
)
elif isinstance(boxes, torch.Tensor):
torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
else:
torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
return
def split_normalization_params(
model: nn.Module, norm_classes: Optional[List[type]] = None
) -> Tuple[List[Tensor], List[Tensor]]:
# Adapted from https://github.com/facebookresearch/ClassyVision/blob/659d7f78/classy_vision/generic/util.py#L501
if not norm_classes:
norm_classes = [
nn.modules.batchnorm._BatchNorm,
nn.LayerNorm,
nn.GroupNorm,
nn.modules.instancenorm._InstanceNorm,
nn.LocalResponseNorm,
]
for t in norm_classes:
if not issubclass(t, nn.Module):
raise ValueError(f"Class {t} is not a subclass of nn.Module.")
classes = tuple(norm_classes)
norm_params = []
other_params = []
for module in model.modules():
if next(module.children(), None):
other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad)
elif isinstance(module, classes):
norm_params.extend(p for p in module.parameters() if p.requires_grad)
else:
other_params.extend(p for p in module.parameters() if p.requires_grad)
return norm_params, other_params
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
def _upcast_non_float(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.dtype not in (torch.float32, torch.float64):
return t.float()
return t
def _loss_inter_union(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
return intsctk, unionk
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Sequence
import numpy as np
import torch
DATA_BATCH = Sequence[dict]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
seed: int) -> None:
"""This function will be called on each worker subprocess after seeding and
before data loading.
Args:
worker_id (int): Worker id in [0, num_workers - 1].
num_workers (int): How many subprocesses to use for data loading.
rank (int): Rank of process in distributed environment. If in
non-distributed environment, it is a constant number `0`.
seed (int): Random seed.
"""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def pseudo_collate(data_batch: DATA_BATCH) -> DATA_BATCH:
"""The default behavior of dataloader is to merge a list of samples to form
a mini-batch of Tensor(s). However, in MMEngine, ``pseudo_collate`` does
nothing just returns ``data_batch``.
Args:
data_batch (Sequence[dict]): Batch of data from
dataloader.
Returns:
Sequence[dict]: Return input ``data_batch``.
"""
return data_batch
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Any, Sequence, Tuple
import numpy as np
import torch
from .base_data_element import BaseDataElement
DATA_BATCH = Sequence[Tuple[Any, BaseDataElement]]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
seed: int) -> None:
"""This function will be called on each worker subprocess after seeding and
before data loading.
Args:
worker_id (int): Worker id in [0, num_workers - 1].
num_workers (int): How many subprocesses to use for data loading.
rank (int): Rank of process in distributed environment. If in
non-distributed environment, it is a constant number `0`.
seed (int): Random seed.
"""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def pseudo_collate(data_batch: DATA_BATCH) -> DATA_BATCH:
"""The default behavior of dataloader is to merge a list of samples to form
a mini-batch of Tensor(s). However, in MMEngine, ``pseudo_collate`` does
nothing just returns ``data_batch``.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): Batch of data from
dataloader.
Returns:
Sequence[Tuple[Any, BaseDataElement]]: Return input ``data_batch``.
"""
return data_batch
|
#!/usr/bin/env python
"""Script to sync libraries from various repositories into the main langchain repository."""
import os
import shutil
import yaml
from pathlib import Path
from typing import Dict, Any
def load_packages_yaml() -> Dict[str, Any]:
"""Load and parse the packages.yml file."""
with open("langchain/libs/packages.yml", "r") as f:
return yaml.safe_load(f)
def get_target_dir(package_name: str) -> Path:
"""Get the target directory for a given package."""
package_name_short = package_name.replace("langchain-", "")
base_path = Path("langchain/libs")
if package_name_short == "experimental":
return base_path / "experimental"
return base_path / "partners" / package_name_short
def clean_target_directories(packages: list) -> None:
"""Remove old directories that will be replaced."""
for package in packages:
target_dir = get_target_dir(package["name"])
if target_dir.exists():
print(f"Removing {target_dir}")
shutil.rmtree(target_dir)
def move_libraries(packages: list) -> None:
"""Move libraries from their source locations to the target directories."""
for package in packages:
repo_name = package["repo"].split("/")[1]
source_path = package["path"]
target_dir = get_target_dir(package["name"])
# Handle root path case
if source_path == ".":
source_dir = repo_name
else:
source_dir = f"{repo_name}/{source_path}"
print(f"Moving {source_dir} to {target_dir}")
# Ensure target directory exists
os.makedirs(os.path.dirname(target_dir), exist_ok=True)
try:
# Move the directory
shutil.move(source_dir, target_dir)
except Exception as e:
print(f"Error moving {source_dir} to {target_dir}: {e}")
def main():
"""Main function to orchestrate the library sync process."""
try:
# Load packages configuration
package_yaml = load_packages_yaml()
# Clean target directories
clean_target_directories([
p
for p in package_yaml["packages"]
if (p["repo"].startswith("langchain-ai/") or p.get("include_in_api_ref"))
and p["repo"] != "langchain-ai/langchain"
])
# Move libraries to their new locations
move_libraries([
p
for p in package_yaml["packages"]
if not p.get("disabled", False)
and (p["repo"].startswith("langchain-ai/") or p.get("include_in_api_ref"))
and p["repo"] != "langchain-ai/langchain"
])
# Delete ones without a pyproject.toml
for partner in Path("langchain/libs/partners").iterdir():
if partner.is_dir() and not (partner / "pyproject.toml").exists():
print(f"Removing {partner} as it does not have a pyproject.toml")
shutil.rmtree(partner)
print("Library sync completed successfully!")
except Exception as e:
print(f"Error during library sync: {e}")
raise
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""Script to sync libraries from various repositories into the main langchain repository."""
import os
import shutil
import yaml
from pathlib import Path
from typing import Dict, Any
def load_packages_yaml() -> Dict[str, Any]:
"""Load and parse the packages.yml file."""
with open("langchain/libs/packages.yml", "r") as f:
return yaml.safe_load(f)
def get_target_dir(package_name: str) -> Path:
"""Get the target directory for a given package."""
package_name_short = package_name.replace("langchain-", "")
base_path = Path("langchain/libs")
if package_name_short == "experimental":
return base_path / "experimental"
return base_path / "partners" / package_name_short
def clean_target_directories(packages: list) -> None:
"""Remove old directories that will be replaced."""
for package in packages:
target_dir = get_target_dir(package["name"])
if target_dir.exists():
print(f"Removing {target_dir}")
shutil.rmtree(target_dir)
def move_libraries(packages: list) -> None:
"""Move libraries from their source locations to the target directories."""
for package in packages:
repo_name = package["repo"].split("/")[1]
source_path = package["path"]
target_dir = get_target_dir(package["name"])
# Handle root path case
if source_path == ".":
source_dir = repo_name
else:
source_dir = f"{repo_name}/{source_path}"
print(f"Moving {source_dir} to {target_dir}")
# Ensure target directory exists
os.makedirs(os.path.dirname(target_dir), exist_ok=True)
try:
# Move the directory
shutil.move(source_dir, target_dir)
except Exception as e:
print(f"Error moving {source_dir} to {target_dir}: {e}")
def main():
"""Main function to orchestrate the library sync process."""
try:
# Load packages configuration
package_yaml = load_packages_yaml()
# Clean target directories
clean_target_directories([
p
for p in package_yaml["packages"]
if p["repo"].startswith("langchain-ai/")
and p["repo"] != "langchain-ai/langchain"
])
# Move libraries to their new locations
move_libraries([
p
for p in package_yaml["packages"]
if not p.get("disabled", False)
and p["repo"].startswith("langchain-ai/")
and p["repo"] != "langchain-ai/langchain"
])
# Delete ones without a pyproject.toml
for partner in Path("langchain/libs/partners").iterdir():
if partner.is_dir() and not (partner / "pyproject.toml").exists():
print(f"Removing {partner} as it does not have a pyproject.toml")
shutil.rmtree(partner)
print("Library sync completed successfully!")
except Exception as e:
print(f"Error during library sync: {e}")
raise
if __name__ == "__main__":
main()
|
from functools import partial
from torchaudio.models import emformer_rnnt_base
from torchaudio.pipelines import RNNTBundle
EMFORMER_RNNT_BASE_MUSTC = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_mustc.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_mustc.json",
_sp_model_path="pipeline-assets/spm_bpe_500_mustc.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on MuST-C release v2.0 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_tedlium3.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_tedlium3.json",
_sp_model_path="pipeline-assets/spm_bpe_500_tedlium3.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on TED-LIUM Release 3 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
|
from functools import partial
from torchaudio.models import emformer_rnnt_base
from torchaudio.pipelines import RNNTBundle
EMFORMER_RNNT_BASE_MUSTC = RNNTBundle(
_rnnt_path="emformer_rnnt_base_mustc.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="global_stats_rnnt_mustc.json",
_sp_model_path="spm_bpe_500_mustc.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on MuST-C release v2.0 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle(
_rnnt_path="emformer_rnnt_base_tedlium3.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="global_stats_rnnt_tedlium3.json",
_sp_model_path="spm_bpe_500_tedlium3.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on TED-LIUM Release 3 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
|
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
"""Plot evaluation result during training. Only for demonstration purpose as it's quite
slow to draw.
"""
def __init__(self, rounds):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data, metric):
return f"{data}-{metric}"
def after_iteration(self, model, epoch, evals_log):
"""Update the plot."""
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback():
"""Demo for defining a custom callback function that plots evaluation result during
training."""
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
"objective": "binary:logistic",
"eval_metric": ["error", "rmse"],
"tree_method": "hist",
"device": "cuda",
},
D_train,
evals=[(D_train, "Train"), (D_valid, "Valid")],
num_boost_round=num_boost_round,
callbacks=[plotting],
)
def check_point_callback():
# only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle):
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
else:
path = os.path.join(tmpdir, "model_" + str(i) + ".json")
assert os.path.exists(path)
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, as_pickle=True, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
|
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
"""Plot evaluation result during training. Only for demonstration purpose as it's quite
slow to draw.
"""
def __init__(self, rounds):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data, metric):
return f"{data}-{metric}"
def after_iteration(self, model, epoch, evals_log):
"""Update the plot."""
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback():
"""Demo for defining a custom callback function that plots evaluation result during
training."""
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
"objective": "binary:logistic",
"eval_metric": ["error", "rmse"],
"tree_method": "hist",
"device": "cuda",
},
D_train,
evals=[(D_train, "Train"), (D_valid, "Valid")],
num_boost_round=num_boost_round,
callbacks=[plotting],
)
def check_point_callback():
# only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle):
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
else:
path = os.path.join(tmpdir, "model_" + str(i) + ".json")
assert os.path.exists(path)
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, iterations=rounds, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, iterations=rounds, as_pickle=True, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
from tempfile import NamedTemporaryFile
import pytest
import requests
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_timeout():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(RequestWouldHangIndefinitelyError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.Timeout):
requests.request("GET", "https://huggingface.co", timeout=1.0)
with pytest.raises(requests.exceptions.Timeout), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_connection_error():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
def test_offline_with_datasets_offline_mode_enabled():
with offline(OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1):
with pytest.raises(ConnectionError):
fsspec_head("hf://dummy")
with pytest.raises(ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
|
from tempfile import NamedTemporaryFile
import huggingface_hub
import pytest
import requests
from packaging import version
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_timeout():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(RequestWouldHangIndefinitelyError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.Timeout):
requests.request("GET", "https://huggingface.co", timeout=1.0)
# old versions of `huggingface_hub` don't have timeouts by default and don't allow to set timeouts in HfFileSystem
if version.parse(huggingface_hub.__version__) >= version.parse("0.23.0"):
with pytest.raises(requests.exceptions.Timeout), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_connection_error():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
def test_offline_with_datasets_offline_mode_enabled():
with offline(OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1):
with pytest.raises(ConnectionError):
fsspec_head("hf://dummy")
with pytest.raises(ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.single_agent_workflow import SingleAgentRunnerMixin
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(SingleAgentRunnerMixin, BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
# last_chat_response will be used later, after the loop.
# We initialize it so it's valid even when 'response' is empty
last_chat_response = ChatResponse(message=ChatMessage())
async for last_chat_response in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
ctx.write_event_to_stream(
AgentStream(
delta=last_chat_response.delta or "",
response=last_chat_response.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(last_chat_response.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
return AgentOutput(
response=last_chat_response.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
await memory.aput_messages(scratchpad)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.single_agent_workflow import SingleAgentRunnerMixin
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(SingleAgentRunnerMixin, BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
# last_chat_response will be used later, after the loop.
# We initialize it so it's valid even when 'response' is empty
last_chat_response = ChatResponse(message=ChatMessage())
async for last_chat_response in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
ctx.write_event_to_stream(
AgentStream(
delta=last_chat_response.delta or "",
response=last_chat_response.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(last_chat_response.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
return AgentOutput(
response=last_chat_response.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for msg in scratchpad:
await memory.aput(msg)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...custom_image_torch_encoder import CustomImageTorchEncoder
@pytest.fixture
def encoder():
model_dir = Path(__file__).parents[1] / 'model'
return CustomImageTorchEncoder(
model_definition_file=str(model_dir / 'external_model.py'),
model_state_dict_path=str(model_dir / 'model_state_dict.pth'),
layer_name='conv1',
model_class_name='ExternalModel',
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.layer_name == 'conv1'
def test_encoder(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
def test_encoder_traversal_paths(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray(
[
Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
]
)
encoder.encode(docs, {'traversal_paths': ['c']})
assert len(docs) == 2
assert len(docs.traverse_flat(['c'])) == 4
for chunk in docs.traverse_flat(['c']):
assert chunk.embedding.shape == (output_dim,)
|
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from ...custom_image_torch_encoder import CustomImageTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def encoder(tmpdir):
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
return CustomImageTorchEncoder(model_definition_file=os.path.join(cur_dir, '../model/external_model.py'),
model_state_dict_path=model_state_dict_path, layer_name='conv1',
model_class_name='ExternalModel')
def test_encoder(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
def test_encoder_traversal_paths(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
Document(chunks=[Document(blob=test_img), Document(blob=test_img)])])
encoder.encode(docs, {'traversal_paths': ['c']})
assert len(docs) == 2
assert len(docs.traverse_flat(['c'])) == 4
for chunk in docs.traverse_flat(['c']):
assert chunk.embedding.shape == (output_dim,)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_metric import CocoMetric
__all__ = ['CocoMetric']
|
# Copyright (c) OpenMMLab. All rights reserved.
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..exceptions import DataConversionWarning
from . import metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa: F401
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..exceptions import DataConversionWarning
from . import metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
|
__version__ = '0.34.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.34.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table)
|
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_model.sh'], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.traverse_flat(traversal_paths).batch(batch_size):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_model.sh'], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyExecutor']
)
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
def test_get_image_name(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
uses = 'jinahub://DummyExecutor'
image_name = get_image_name(uses)
assert image_name == 'jinahub/DummyExecutor'
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmdet.utils import update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='PanopticFPN',
img_norm_cfg=img_norm_cfg,
semantic_head=dict(
type='PanopticFPNHead',
num_things_classes=80,
num_stuff_classes=53,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
# Forced to remove NumClassCheckHook
custom_hooks = []
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
semantic_head=dict(
type='PanopticFPNHead',
num_things_classes=80,
num_stuff_classes=53,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
custom_hooks = []
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
top_k = parameters.get('top_k')
if top_k:
top_k = int(top_k)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores['euclidean'].value,
reverse=True,
)[:top_k]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
import os
import random
import time
from typing import Dict, OrderedDict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.compound.FaissLMDBSearcher.faiss_lmdb import FaissLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 100
class TagMatchMerger(Executor):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
if docs_matrix:
# noinspection PyTypeHints
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
top_k = parameters.get('top_k')
if top_k:
top_k = int(top_k)
for doc in results.values():
doc.matches = sorted(
doc.matches,
key=lambda m: m.scores['l2'].value,
reverse=True,
)[:top_k]
docs = DocumentArray(list(results.values()))
return docs
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class FaissTaggingFileSearcher(FaissLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim]).astype(dtype=np.float32)
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
@pytest.mark.parametrize('num_shards', (2, 3, 7))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
"""Configuration for unit tests."""
from collections.abc import Sequence
from importlib import util
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
elif only_extended:
item.add_marker(pytest.mark.skip(reason="Skipping not an extended test."))
|
"""Configuration for unit tests."""
from collections.abc import Sequence
from importlib import util
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMT5ForConditionalGeneration
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class MT5IntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparison run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMT5ForConditionalGeneration
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class MT5IntegrationTest(unittest.TestCase):
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = FlaxMT5ForConditionalGeneration.from_pretrained("google/mt5-small")
tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
input_ids = tokenizer("Hello there", return_tensors="np").input_ids
labels = tokenizer("Hi I am", return_tensors="np").input_ids
decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id)
logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
|
_base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
"""Test EdenAi's image moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiExplicitImageTool
def test_edenai_call() -> None:
"""Test simple call to edenai's image moderation endpoint."""
image_moderation = EdenAiExplicitImageTool(providers=["amazon"])
output = image_moderation.invoke("https://static.javatpoint.com/images/objects.jpg")
assert image_moderation.name == "edenai_image_explicit_content_detection"
assert image_moderation.feature == "image"
assert image_moderation.subfeature == "explicit_content"
assert isinstance(output, str)
|
"""Test EdenAi's image moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai import EdenAiExplicitImageTool
def test_edenai_call() -> None:
"""Test simple call to edenai's image moderation endpoint."""
image_moderation = EdenAiExplicitImageTool(providers=["amazon"]) # type: ignore[call-arg]
output = image_moderation.invoke("https://static.javatpoint.com/images/objects.jpg")
assert image_moderation.name == "edenai_image_explicit_content_detection"
assert image_moderation.feature == "image"
assert image_moderation.subfeature == "explicit_content"
assert isinstance(output, str)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out-file', default=None, help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr,
out_file=args.out_file)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr,
out_file=args.out_file)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
from functools import partial
from torchaudio.models import emformer_rnnt_base
from torchaudio.pipelines import RNNTBundle
EMFORMER_RNNT_BASE_MUSTC = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_mustc.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_mustc.json",
_sp_model_path="pipeline-assets/spm_bpe_500_mustc.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base` and utilizes weights
trained on *MuST-C release v2.0* :cite:`CATTONI2021101155` dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_tedlium3.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_tedlium3.json",
_sp_model_path="pipeline-assets/spm_bpe_500_tedlium3.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on TED-LIUM Release 3 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
|
from functools import partial
from torchaudio.models import emformer_rnnt_base
from torchaudio.pipelines import RNNTBundle
EMFORMER_RNNT_BASE_MUSTC = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_mustc.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_mustc.json",
_sp_model_path="pipeline-assets/spm_bpe_500_mustc.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on MuST-C release v2.0 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_tedlium3.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_tedlium3.json",
_sp_model_path="pipeline-assets/spm_bpe_500_tedlium3.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on TED-LIUM Release 3 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), corpus_regularizer_weight=3e-5, query_regularizer_weight=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), lambda_corpus=3e-5, lambda_query=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
forced_align,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
compute_kaldi_pitch,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
forced_align,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"compute_kaldi_pitch",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve
similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),
(832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline
to achieve similar functions. Such as `dict(type='Resize', img_scale=[(448,
448), (832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time() - start_time))
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, "r", encoding="utf8")
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import faiss
import numpy as np
import time
import gzip
import lzma
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time() - start_time))
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, "r", encoding="utf8")
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: AnyEmbedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import Dict, List, Optional, Tuple, Union
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
|
#!/usr/bin/env python3
import logging
import pathlib
from argparse import ArgumentParser
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from librispeech.lightning import LibriSpeechRNNTModule
from mustc.lightning import MuSTCRNNTModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from tedlium3.lightning import TEDLIUM3RNNTModule
def get_trainer(args):
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=True,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=True,
verbose=True,
)
callbacks = [
checkpoint,
train_checkpoint,
]
return Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
gpus=args.gpus,
accelerator="gpu",
strategy="ddp",
gradient_clip_val=args.gradient_clip_val,
callbacks=callbacks,
)
def get_lightning_module(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return LibriSpeechRNNTModule(
librispeech_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return TEDLIUM3RNNTModule(
tedlium_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_MUSTC:
return MuSTCRNNTModule(
mustc_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
required=True,
)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
parser.add_argument(
"--gradient-clip-val", default=10.0, type=float, help="Value to clip gradient values to. (Default: 10.0)"
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
trainer = get_trainer(args)
trainer.fit(model)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
import logging
import pathlib
from argparse import ArgumentParser
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC
from librispeech.lightning import LibriSpeechRNNTModule
from mustc.lightning import MuSTCRNNTModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from tedlium3.lightning import TEDLIUM3RNNTModule
def get_trainer(args):
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=True,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=True,
verbose=True,
)
callbacks = [
checkpoint,
train_checkpoint,
]
return Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
gpus=args.gpus,
accelerator="gpu",
strategy="ddp",
gradient_clip_val=args.gradient_clip_val,
callbacks=callbacks,
)
def get_lightning_module(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return LibriSpeechRNNTModule(
librispeech_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return TEDLIUM3RNNTModule(
tedlium_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_MUSTC:
return MuSTCRNNTModule(
mustc_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
required=True,
)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
parser.add_argument(
"--gradient-clip-val", default=10.0, type=float, help="Value to clip gradient values to. (Default: 10.0)"
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
trainer = get_trainer(args)
trainer.fit(model)
if __name__ == "__main__":
cli_main()
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "DSUser"
CI_HUB_USER_FULL_NAME = "Dummy Datasets User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
from typing import Dict, List, Tuple
import pytest
from opentelemetry.metrics import Meter
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import (
HistogramDataPoint,
InMemoryMetricReader,
Metric,
)
from jina.serve.networking import _NetworkingHistograms
@pytest.fixture
def metrics_setup() -> Tuple[InMemoryMetricReader, MeterProvider]:
metric_reader = InMemoryMetricReader()
meter_provider = MeterProvider(metric_readers=[metric_reader])
meter = meter_provider.get_meter('test')
yield metric_reader, meter
if hasattr(meter_provider, 'force_flush'):
metric_reader.force_flush()
if hasattr(meter_provider, 'shutdown'):
meter_provider.shutdown()
def test_get_labels():
a: _NetworkingHistograms = _NetworkingHistograms()
assert a._get_labels() == None
HIST_LABELS = {
'a': 1,
'b': 2,
}
a.histogram_metric_labels = HIST_LABELS
assert a._get_labels() == HIST_LABELS
ADD_LABELS = {
'b': 3,
'c': 4,
}
assert a._get_labels(ADD_LABELS) == {**HIST_LABELS, **ADD_LABELS}
def test_recording_methods(metrics_setup: Tuple[InMemoryMetricReader, Meter]):
metric_reader, meter = metrics_setup
a: _NetworkingHistograms = _NetworkingHistograms(
sending_requests_time_metrics=meter.create_histogram("request_time"),
send_requests_bytes_metrics=meter.create_histogram("request_bytes"),
received_response_bytes=meter.create_histogram("response_bytes"),
histogram_metric_labels=None,
)
a.record_sending_requests_time_metrics(10)
a.record_send_requests_bytes_metrics(20)
a.record_received_response_bytes(30)
histogram_metrics: List[Metric] = (
metric_reader.get_metrics_data().resource_metrics[0].scope_metrics[0].metrics
)
data_points_sums: Dict[str, HistogramDataPoint] = {
hist.name: next(iter(hist.data.data_points)).sum for hist in histogram_metrics
}
assert data_points_sums == {
'request_time': 10,
'request_bytes': 20,
'response_bytes': 30,
}
|
import pytest
from typing import Tuple, List, Dict
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import (
InMemoryMetricReader,
Metric,
HistogramDataPoint,
)
from opentelemetry.metrics import Meter
from jina.serve.networking import _NetworkingHistograms
@pytest.fixture
def metrics_setup() -> Tuple[InMemoryMetricReader, MeterProvider]:
metric_reader = InMemoryMetricReader()
meter_provider = MeterProvider(metric_readers=[metric_reader])
meter = meter_provider.get_meter('test')
yield metric_reader, meter
if hasattr(meter_provider, 'force_flush'):
metric_reader.force_flush()
if hasattr(meter_provider, 'shutdown'):
meter_provider.shutdown()
def test_get_labels():
a: _NetworkingHistograms = _NetworkingHistograms()
assert a._get_labels() == None
HIST_LABELS = {
'a': 1,
'b': 2,
}
a.histogram_metric_labels = HIST_LABELS
assert a._get_labels() == HIST_LABELS
ADD_LABELS = {
'b': 3,
'c': 4,
}
assert a._get_labels(ADD_LABELS) == {**HIST_LABELS, **ADD_LABELS}
def test_recording_methods(metrics_setup: Tuple[InMemoryMetricReader, Meter]):
metric_reader, meter = metrics_setup
a: _NetworkingHistograms = _NetworkingHistograms(
sending_requests_time_metrics=meter.create_histogram("request_time"),
send_requests_bytes_metrics=meter.create_histogram("request_bytes"),
received_response_bytes=meter.create_histogram("response_bytes"),
histogram_metric_labels=None,
)
a.record_sending_requests_time_metrics(10)
a.record_send_requests_bytes_metrics(20)
a.record_received_response_bytes(30)
histogram_metrics: List[Metric] = (
metric_reader.get_metrics_data().resource_metrics[0].scope_metrics[0].metrics
)
data_points_sums: Dict[str, HistogramDataPoint] = {
hist.name: next(hist.data.data_points).sum for hist in histogram_metrics
}
assert data_points_sums == {
'request_time': 10,
'request_bytes': 20,
'response_bytes': 30,
}
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocArrayStacked[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocArrayStacked[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocArrayStacked[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = 1
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == 1
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
|
import numpy as np
from docarray import BaseDocument
from docarray.array import DocumentArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDocument):
price: int
class MyDoc(BaseDocument):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocumentArrayStacked[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocumentArrayStacked[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDocument):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocumentArrayStacked[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = 1
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == 1
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
|
from __future__ import annotations
from typing_extensions import deprecated
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder.evaluation.CEClassificationEvaluator import CEClassificationEvaluator
@deprecated(
"This evaluator has been deprecated in favor of the more general CEClassificationEvaluator. "
"Please use CEClassificationEvaluator instead, which supports both binary and multi-class "
"evaluation. It accepts approximately the same inputs as this evaluator."
)
class CEBinaryAccuracyEvaluator(CEClassificationEvaluator):
"""
This evaluator has been deprecated in favor of the more general CEClassificationEvaluator.
"""
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info(f"Accuracy: {acc * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import ast
from langchain_community.utilities.steam import SteamWebAPIWrapper
def test_get_game_details() -> None:
"""Test for getting game details on Steam"""
steam = SteamWebAPIWrapper()
output = steam.run("get_game_details", "Terraria")
assert "id" in output
assert "link" in output
assert "detailed description" in output
assert "supported languages" in output
assert "price" in output
def test_get_recommended_games() -> None:
"""Test for getting recommended games on Steam"""
steam = SteamWebAPIWrapper()
output = steam.run("get_recommended_games", "76561198362745711")
output = ast.literal_eval(output)
assert len(output) == 5
|
import ast
from langchain_community.utilities.steam import SteamWebAPIWrapper
def test_get_game_details() -> None:
"""Test for getting game details on Steam"""
steam = SteamWebAPIWrapper() # type: ignore[call-arg]
output = steam.run("get_game_details", "Terraria")
assert "id" in output
assert "link" in output
assert "detailed description" in output
assert "supported languages" in output
assert "price" in output
def test_get_recommended_games() -> None:
"""Test for getting recommended games on Steam"""
steam = SteamWebAPIWrapper() # type: ignore[call-arg]
output = steam.run("get_recommended_games", "76561198362745711")
output = ast.literal_eval(output)
assert len(output) == 5
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe2\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 585
_DOCUMENTPROTO._serialized_start = 588
_DOCUMENTPROTO._serialized_end = 718
_DOCUMENTPROTO_DATAENTRY._serialized_start = 654
_DOCUMENTPROTO_DATAENTRY._serialized_end = 718
_DOCUMENTARRAYPROTO._serialized_start = 720
_DOCUMENTARRAYPROTO._serialized_end = 779
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xce\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x0c\n\x02id\x18\t \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\n \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start=58
_DENSENDARRAYPROTO._serialized_end=123
_NDARRAYPROTO._serialized_start=125
_NDARRAYPROTO._serialized_end=228
_NODEPROTO._serialized_start=231
_NODEPROTO._serialized_end=565
_DOCUMENTPROTO._serialized_start=568
_DOCUMENTPROTO._serialized_end=698
_DOCUMENTPROTO_DATAENTRY._serialized_start=634
_DOCUMENTPROTO_DATAENTRY._serialized_end=698
_DOCUMENTARRAYPROTO._serialized_start=700
_DOCUMENTARRAYPROTO._serialized_end=759
# @@protoc_insertion_point(module_scope)
|
"""Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
class GoogleJobsQueryRun(BaseTool):
"""Tool that queries the Google Jobs API."""
name: str = "google_jobs"
description: str = (
"A wrapper around Google Jobs Search. "
"Useful for when you need to get information about"
"google search Jobs from Google Jobs"
"Input should be a search query."
)
api_wrapper: GoogleJobsAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
class GoogleJobsQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google Jobs API."""
name: str = "google_jobs"
description: str = (
"A wrapper around Google Jobs Search. "
"Useful for when you need to get information about"
"google search Jobs from Google Jobs"
"Input should be a search query."
)
api_wrapper: GoogleJobsAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_poolformer import *
from .feature_extraction_poolformer import *
from .image_processing_poolformer import *
from .image_processing_poolformer_fast import *
from .modeling_poolformer import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_poolformer import *
from .feature_extraction_poolformer import *
from .image_processing_poolformer import *
from .modeling_poolformer import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import json
import os
from typing import Dict
import torch
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activiation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
from ..util import fullname, import_from_string
class Dense(nn.Module):
"""Feed-forward function with activiation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
:param in_features: Size of the input dimension
:param out_features: Output size
:param bias: Add a bias vector
:param activation_function: Pytorch activation function applied on output
:param init_weight: Initial value for the matrix of the linear layer
:param init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super(Dense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return "Dense({})".format(self.get_config_dict())
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert YOSO checkpoints from the original repository. URL: https://github.com/mlpen/YOSO"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def rename_key(orig_key):
if "model" in orig_key:
orig_key = orig_key.replace("model.", "")
if "norm1" in orig_key:
orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
if "norm2" in orig_key:
orig_key = orig_key.replace("norm2", "output.LayerNorm")
if "norm" in orig_key:
orig_key = orig_key.replace("norm", "LayerNorm")
if "transformer" in orig_key:
layer_num = orig_key.split(".")[0].split("_")[-1]
orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
orig_key = orig_key.replace("mha.attn", "attention.self")
if "mha" in orig_key:
orig_key = orig_key.replace("mha", "attention")
if "W_q" in orig_key:
orig_key = orig_key.replace("W_q", "self.query")
if "W_k" in orig_key:
orig_key = orig_key.replace("W_k", "self.key")
if "W_v" in orig_key:
orig_key = orig_key.replace("W_v", "self.value")
if "ff1" in orig_key:
orig_key = orig_key.replace("ff1", "intermediate.dense")
if "ff2" in orig_key:
orig_key = orig_key.replace("ff2", "output.dense")
if "ff" in orig_key:
orig_key = orig_key.replace("ff", "output.dense")
if "mlm_class" in orig_key:
orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
if "mlm" in orig_key:
orig_key = orig_key.replace("mlm", "cls.predictions.transform")
if "cls" not in orig_key:
orig_key = "yoso." + orig_key
return orig_key
def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
orig_state_dict["yoso.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2
return orig_state_dict
def convert_yoso_checkpoint(checkpoint_path, yoso_config_file, pytorch_dump_path):
orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"]
config = YosoConfig.from_json_file(yoso_config_file)
model = YosoForMaskedLM(config)
new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict)
print(model.load_state_dict(new_state_dict))
model.eval()
model.save_pretrained(pytorch_dump_path)
print(f"Checkpoint successfully converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert YOSO checkpoints from the original repository. URL: https://github.com/mlpen/YOSO"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def rename_key(orig_key):
if "model" in orig_key:
orig_key = orig_key.replace("model.", "")
if "norm1" in orig_key:
orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
if "norm2" in orig_key:
orig_key = orig_key.replace("norm2", "output.LayerNorm")
if "norm" in orig_key:
orig_key = orig_key.replace("norm", "LayerNorm")
if "transformer" in orig_key:
layer_num = orig_key.split(".")[0].split("_")[-1]
orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
orig_key = orig_key.replace("mha.attn", "attention.self")
if "mha" in orig_key:
orig_key = orig_key.replace("mha", "attention")
if "W_q" in orig_key:
orig_key = orig_key.replace("W_q", "self.query")
if "W_k" in orig_key:
orig_key = orig_key.replace("W_k", "self.key")
if "W_v" in orig_key:
orig_key = orig_key.replace("W_v", "self.value")
if "ff1" in orig_key:
orig_key = orig_key.replace("ff1", "intermediate.dense")
if "ff2" in orig_key:
orig_key = orig_key.replace("ff2", "output.dense")
if "ff" in orig_key:
orig_key = orig_key.replace("ff", "output.dense")
if "mlm_class" in orig_key:
orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
if "mlm" in orig_key:
orig_key = orig_key.replace("mlm", "cls.predictions.transform")
if "cls" not in orig_key:
orig_key = "yoso." + orig_key
return orig_key
def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
orig_state_dict["yoso.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2
return orig_state_dict
def convert_yoso_checkpoint(checkpoint_path, yoso_config_file, pytorch_dump_path):
orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"]
config = YosoConfig.from_json_file(yoso_config_file)
model = YosoForMaskedLM(config)
new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict)
print(model.load_state_dict(new_state_dict))
model.eval()
model.save_pretrained(pytorch_dump_path)
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
DocumentBlock,
CachePoint,
CacheControl,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
"DocumentBlock",
"CachePoint",
"CacheControl",
]
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
DocumentBlock,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
"DocumentBlock",
]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the tf.test.benchmark."""
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class BenchmarkTest(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = self.get_temp_dir() + os.path.sep
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
always_print_fields_with_no_presence=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the tf.test.benchmark."""
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class BenchmarkTest(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = self.get_temp_dir() + os.path.sep
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
including_default_value_fields=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
"""Conftest."""
from typing import List
import pytest
from llama_index.core.schema import Document
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
return [Document(text=doc_text)]
|
"""Conftest."""
from typing import List
import pytest
from llama_index.core.schema import Document
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(text=doc_text)]
|
"""Chroma Auto-retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.indices.vector_store.retrievers import (
VectorIndexAutoRetriever,
)
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.vector_stores.types import VectorStoreInfo
from llama_index.vector_stores.chroma import ChromaVectorStore
class ChromaAutoretrievalPack(BaseLlamaPack):
"""Chroma auto-retrieval pack."""
def __init__(
self,
collection_name: str,
vector_store_info: VectorStoreInfo,
nodes: Optional[List[TextNode]] = None,
client: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Init params."""
import chromadb
chroma_client = client or chromadb.EphemeralClient()
chroma_collection = chroma_client.get_or_create_collection(collection_name)
self._vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = VectorIndexAutoRetriever(
self._index, vector_store_info=vector_store_info
)
self.query_engine = RetrieverQueryEngine(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Chroma Auto-retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.indices.vector_store.retrievers import (
VectorIndexAutoRetriever,
)
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.vector_stores.types import VectorStoreInfo
from llama_index.vector_stores.chroma import ChromaVectorStore
class ChromaAutoretrievalPack(BaseLlamaPack):
"""Chroma auto-retrieval pack."""
def __init__(
self,
collection_name: str,
vector_store_info: VectorStoreInfo,
nodes: Optional[List[TextNode]] = None,
client: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Init params."""
import chromadb
chroma_client = client or chromadb.EphemeralClient()
chroma_collection = chroma_client.get_or_create_collection(collection_name)
self._vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = VectorIndexAutoRetriever(
self._index, vector_store_info=vector_store_info
)
self.query_engine = RetrieverQueryEngine(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
# Copyright 2025 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import deprecate, logging
from .controlnets.controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class SD3ControlNetOutput(SD3ControlNetOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `SD3ControlNetOutput` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetOutput`, instead."
deprecate("diffusers.models.controlnet_sd3.SD3ControlNetOutput", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class SD3ControlNetModel(SD3ControlNetModel):
def __init__(
self,
sample_size: int = 128,
patch_size: int = 2,
in_channels: int = 16,
num_layers: int = 18,
attention_head_dim: int = 64,
num_attention_heads: int = 18,
joint_attention_dim: int = 4096,
caption_projection_dim: int = 1152,
pooled_projection_dim: int = 2048,
out_channels: int = 16,
pos_embed_max_size: int = 96,
extra_conditioning_channels: int = 0,
):
deprecation_message = "Importing `SD3ControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetModel`, instead."
deprecate("diffusers.models.controlnet_sd3.SD3ControlNetModel", "0.34", deprecation_message)
super().__init__(
sample_size=sample_size,
patch_size=patch_size,
in_channels=in_channels,
num_layers=num_layers,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
joint_attention_dim=joint_attention_dim,
caption_projection_dim=caption_projection_dim,
pooled_projection_dim=pooled_projection_dim,
out_channels=out_channels,
pos_embed_max_size=pos_embed_max_size,
extra_conditioning_channels=extra_conditioning_channels,
)
class SD3MultiControlNetModel(SD3MultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `SD3MultiControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3MultiControlNetModel`, instead."
deprecate("diffusers.models.controlnet_sd3.SD3MultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
# Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import deprecate, logging
from .controlnets.controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class SD3ControlNetOutput(SD3ControlNetOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `SD3ControlNetOutput` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetOutput`, instead."
deprecate("diffusers.models.controlnet_sd3.SD3ControlNetOutput", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
class SD3ControlNetModel(SD3ControlNetModel):
def __init__(
self,
sample_size: int = 128,
patch_size: int = 2,
in_channels: int = 16,
num_layers: int = 18,
attention_head_dim: int = 64,
num_attention_heads: int = 18,
joint_attention_dim: int = 4096,
caption_projection_dim: int = 1152,
pooled_projection_dim: int = 2048,
out_channels: int = 16,
pos_embed_max_size: int = 96,
extra_conditioning_channels: int = 0,
):
deprecation_message = "Importing `SD3ControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3ControlNetModel`, instead."
deprecate("diffusers.models.controlnet_sd3.SD3ControlNetModel", "0.34", deprecation_message)
super().__init__(
sample_size=sample_size,
patch_size=patch_size,
in_channels=in_channels,
num_layers=num_layers,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
joint_attention_dim=joint_attention_dim,
caption_projection_dim=caption_projection_dim,
pooled_projection_dim=pooled_projection_dim,
out_channels=out_channels,
pos_embed_max_size=pos_embed_max_size,
extra_conditioning_channels=extra_conditioning_channels,
)
class SD3MultiControlNetModel(SD3MultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `SD3MultiControlNetModel` from `diffusers.models.controlnet_sd3` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sd3 import SD3MultiControlNetModel`, instead."
deprecate("diffusers.models.controlnet_sd3.SD3MultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ArizeCallbackHandler": "langchain_community.callbacks.arize_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ArizeCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.arize_callback import ArizeCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ArizeCallbackHandler": "langchain_community.callbacks.arize_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ArizeCallbackHandler",
]
|
from pathlib import Path
from typing import Any, Optional, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / "pyproject.toml"
if pyproject_path.exists():
return package_root
package_root = package_root.parent
raise FileNotFoundError("No pyproject.toml found")
class LangServeExport(TypedDict):
"""
Fields from pyproject.toml that are relevant to LangServe
Attributes:
module: The module to import from, tool.langserve.export_module
attr: The attribute to import from the module, tool.langserve.export_attr
package_name: The name of the package, tool.poetry.name
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
with open(filepath) as f:
data: dict[str, Any] = load(f)
try:
module = data["tool"]["langserve"]["export_module"]
attr = data["tool"]["langserve"]["export_attr"]
package_name = data["tool"]["poetry"]["name"]
except KeyError as e:
raise KeyError("Invalid LangServe PyProject.toml") from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
from pathlib import Path
from typing import Any, Dict, Optional, Set, TypedDict
from tomlkit import load
def get_package_root(cwd: Optional[Path] = None) -> Path:
# traverse path for routes to host (any directory holding a pyproject.toml file)
package_root = Path.cwd() if cwd is None else cwd
visited: Set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / "pyproject.toml"
if pyproject_path.exists():
return package_root
package_root = package_root.parent
raise FileNotFoundError("No pyproject.toml found")
class LangServeExport(TypedDict):
"""
Fields from pyproject.toml that are relevant to LangServe
Attributes:
module: The module to import from, tool.langserve.export_module
attr: The attribute to import from the module, tool.langserve.export_attr
package_name: The name of the package, tool.poetry.name
"""
module: str
attr: str
package_name: str
def get_langserve_export(filepath: Path) -> LangServeExport:
with open(filepath) as f:
data: Dict[str, Any] = load(f)
try:
module = data["tool"]["langserve"]["export_module"]
attr = data["tool"]["langserve"]["export_attr"]
package_name = data["tool"]["poetry"]["name"]
except KeyError as e:
raise KeyError("Invalid LangServe PyProject.toml") from e
return LangServeExport(module=module, attr=attr, package_name=package_name)
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'],
backend_args=backend_args),
dict(
type='CityScapesMetric',
seg_prefix=data_root + 'gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance',
backend_args=backend_args)
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
In particular, you can pass "no_duplicates" to `batch_sampler` in the `SentenceTransformerTrainingArguments` class.
"""
from __future__ import annotations
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if not isinstance(text, str):
text = str(text)
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
if not isinstance(text, str):
text = str(text)
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
In particular, you can pass "no_duplicates" to `batch_sampler` in the `SentenceTransformerTrainingArguments` class.
"""
from __future__ import annotations
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
"""
A special data loader to be used with MultipleNegativesRankingLoss.
The data loader ensures that there are no duplicate sentences within the same batch
"""
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.train_examples)
def __iter__(self):
for _ in range(self.__len__()):
batch = []
texts_in_batch = set()
while len(batch) < self.batch_size:
example = self.train_examples[self.data_pointer]
valid_example = True
for text in example.texts:
if text.strip().lower() in texts_in_batch:
valid_example = False
break
if valid_example:
batch.append(example)
for text in example.texts:
texts_in_batch.add(text.strip().lower())
self.data_pointer += 1
if self.data_pointer >= len(self.train_examples):
self.data_pointer = 0
random.shuffle(self.train_examples)
yield self.collate_fn(batch) if self.collate_fn is not None else batch
def __len__(self):
return math.floor(len(self.train_examples) / self.batch_size)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.visualization.draw_bounding_boxes import (
draw_bounding_boxes as draw_bounding_boxes,
)
from keras.src.visualization.draw_segmentation_masks import (
draw_segmentation_masks as draw_segmentation_masks,
)
from keras.src.visualization.plot_bounding_box_gallery import (
plot_bounding_box_gallery as plot_bounding_box_gallery,
)
from keras.src.visualization.plot_image_gallery import (
plot_image_gallery as plot_image_gallery,
)
from keras.src.visualization.plot_segmentation_mask_gallery import (
plot_segmentation_mask_gallery as plot_segmentation_mask_gallery,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.visualization.draw_bounding_boxes import draw_bounding_boxes
from keras.src.visualization.draw_segmentation_masks import (
draw_segmentation_masks,
)
from keras.src.visualization.plot_bounding_box_gallery import (
plot_bounding_box_gallery,
)
from keras.src.visualization.plot_image_gallery import plot_image_gallery
from keras.src.visualization.plot_segmentation_mask_gallery import (
plot_segmentation_mask_gallery,
)
|
_base_ = './mask-rcnn_r50_fpn_gn-all_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_gn')))
|
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_gn')))
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
from typing import TYPE_CHECKING, Any, Generic, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available # noqa
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
# Below is the hack to make the type checker happy. But `AnyTensor` is defined as a class and with same underlying
# behavior as `Union[TorchTensor, TensorFlowTensor, NdArray]` so it should be fine to use `AnyTensor` as
# the type for `tensor` field in `BaseDoc` class.
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
else:
T = TypeVar("T", bound="AnyTensor")
ShapeT = TypeVar('ShapeT')
class AnyTensor(AbstractTensor, Generic[ShapeT]):
"""
Represents a tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
!!! note:
when doing type checking (mypy or pycharm type checker), this class will actually be replace by a Union of the three
tensor types. You can reason about this class as if it was a Union.
```python
from docarray import BaseDoc
from docarray.typing import AnyTensor
class MyTensorDoc(BaseDoc):
tensor: AnyTensor
# Example usage with TensorFlow:
# import tensorflow as tf
# doc = MyTensorDoc(tensor=tf.zeros(1000, 2))
# Example usage with PyTorch:
import torch
doc = MyTensorDoc(tensor=torch.zeros(1000, 2))
# Example usage with NumPy:
import numpy as np
doc = MyTensorDoc(tensor=np.zeros((1000, 2)))
```
"""
def __getitem__(self: T, item):
pass
def __setitem__(self, index, value):
pass
def __iter__(self):
pass
def __len__(self):
pass
@classmethod
def _docarray_from_native(cls: Type[T], value: Any):
raise RuntimeError(f'This method should not be called on {cls}.')
@staticmethod
def get_comp_backend():
raise RuntimeError('This method should not be called on AnyTensor.')
def to_protobuf(self):
raise RuntimeError(f'This method should not be called on {self.__class__}.')
def _docarray_to_json_compatible(self):
raise RuntimeError(f'This method should not be called on {self.__class__}.')
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T):
raise RuntimeError(f'This method should not be called on {cls}.')
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
# Check for TorchTensor first, then TensorFlowTensor, then NdArray
if torch_available:
if isinstance(value, TorchTensor):
return value
elif isinstance(value, torch.Tensor):
return TorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return value
elif isinstance(value, tf.Tensor):
return TensorFlowTensor._docarray_from_native(value) # noqa
try:
return NdArray.validate(value, field, config)
except Exception as e: # noqa
print(e)
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Generic, Type, TypeVar, Union
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available # noqa
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
# Below is the hack to make the type checker happy. But `AnyTensor` is defined as a class and with same underlying
# behavior as `Union[TorchTensor, TensorFlowTensor, NdArray]` so it should be fine to use `AnyTensor` as
# the type for `tensor` field in `BaseDoc` class.
AnyTensor = Union[NdArray]
if torch_available and tf_available:
AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
elif torch_available:
AnyTensor = Union[NdArray, TorchTensor] # type: ignore
elif tf_available:
AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
else:
T = TypeVar("T", bound="AnyTensor")
ShapeT = TypeVar('ShapeT')
class AnyTensor(AbstractTensor, Generic[ShapeT]):
"""
Represents a tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyTensor
class MyTensorDoc(BaseDoc):
tensor: AnyTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyTensorDoc(tensor=tf.zeros(1000, 2))
# Example usage with PyTorch:
import torch
doc = MyTensorDoc(tensor=torch.zeros(1000, 2))
# Example usage with NumPy:
import numpy as np
doc = MyTensorDoc(tensor=np.zeros((1000, 2)))
'''
---
Returns:
Union[TorchTensor, TensorFlowTensor, NdArray]: The validated and converted tensor.
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
def __getitem__(self: T, item):
pass
def __setitem__(self, index, value):
pass
def __iter__(self):
pass
def __len__(self):
pass
@classmethod
def _docarray_from_native(cls: Type[T], value: Any):
raise RuntimeError(f'This method should not be called on {cls}.')
@staticmethod
def get_comp_backend():
raise RuntimeError('This method should not be called on AnyTensor.')
def to_protobuf(self):
raise RuntimeError(f'This method should not be called on {self.__class__}.')
def _docarray_to_json_compatible(self):
raise RuntimeError(f'This method should not be called on {self.__class__}.')
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T):
raise RuntimeError(f'This method should not be called on {cls}.')
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
# Check for TorchTensor first, then TensorFlowTensor, then NdArray
if torch_available:
if isinstance(value, TorchTensor):
return value
elif isinstance(value, torch.Tensor):
return TorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return value
elif isinstance(value, tf.Tensor):
return TensorFlowTensor._docarray_from_native(value) # noqa
try:
return NdArray.validate(value, field, config)
except Exception as e: # noqa
print(e)
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import overload
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import URL, Requests
@overload
def _convert_to_api_url(url: str) -> str: ...
@overload
def _convert_to_api_url(url: URL) -> URL: ...
def _convert_to_api_url(url: str | URL) -> str | URL:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
if url_as_str := isinstance(url, str):
url = urlparse(url)
path_parts = url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url if url_as_str else urlparse(api_url)
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.auth_header(),
"Accept": "application/vnd.github.v3+json",
}
def convert_comment_url_to_api_endpoint(comment_url: str) -> str:
"""
Converts a GitHub comment URL (web interface) to the appropriate API endpoint URL.
Handles:
1. Issue/PR comments: #issuecomment-{id}
2. PR review comments: #discussion_r{id}
Returns the appropriate API endpoint path for the comment.
"""
# First, check if this is already an API URL
parsed_url = urlparse(comment_url)
if parsed_url.hostname == "api.github.com":
return comment_url
# Replace pull with issues for comment endpoints
if "/pull/" in comment_url:
comment_url = comment_url.replace("/pull/", "/issues/")
# Handle issue/PR comments (#issuecomment-xxx)
if "#issuecomment-" in comment_url:
base_url, comment_part = comment_url.split("#issuecomment-")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for issue comments
return (
f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{comment_id}"
)
# Handle PR review comments (#discussion_r)
elif "#discussion_r" in comment_url:
base_url, comment_part = comment_url.split("#discussion_r")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for PR review comments
return (
f"https://api.github.com/repos/{owner}/{repo}/pulls/comments/{comment_id}"
)
# If no specific comment identifiers are found, use the general URL conversion
return _convert_to_api_url(comment_url)
def get_api(
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
convert_urls: bool = True,
) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import Requests
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
api_base = f"https://api.github.com/repos/{owner}/{repo}"
if len(path_parts) > 2:
additional_path = "/".join(path_parts[2:])
api_url = f"{api_base}/{additional_path}"
else:
# Repository base URL
api_url = api_base
else:
raise ValueError("Invalid GitHub URL format.")
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.auth_header(),
"Accept": "application/vnd.github.v3+json",
}
def convert_comment_url_to_api_endpoint(comment_url: str) -> str:
"""
Converts a GitHub comment URL (web interface) to the appropriate API endpoint URL.
Handles:
1. Issue/PR comments: #issuecomment-{id}
2. PR review comments: #discussion_r{id}
Returns the appropriate API endpoint path for the comment.
"""
# First, check if this is already an API URL
parsed_url = urlparse(comment_url)
if parsed_url.hostname == "api.github.com":
return comment_url
# Replace pull with issues for comment endpoints
if "/pull/" in comment_url:
comment_url = comment_url.replace("/pull/", "/issues/")
# Handle issue/PR comments (#issuecomment-xxx)
if "#issuecomment-" in comment_url:
base_url, comment_part = comment_url.split("#issuecomment-")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for issue comments
return (
f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{comment_id}"
)
# Handle PR review comments (#discussion_r)
elif "#discussion_r" in comment_url:
base_url, comment_part = comment_url.split("#discussion_r")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for PR review comments
return (
f"https://api.github.com/repos/{owner}/{repo}/pulls/comments/{comment_id}"
)
# If no specific comment identifiers are found, use the general URL conversion
return _convert_to_api_url(comment_url)
def get_api(
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
convert_urls: bool = True,
) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,
extra_headers=_get_headers(credentials),
)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_sana"] = ["SanaPipeline"]
_import_structure["pipeline_sana_controlnet"] = ["SanaControlNetPipeline"]
_import_structure["pipeline_sana_sprint"] = ["SanaSprintPipeline"]
_import_structure["pipeline_sana_sprint_img2img"] = ["SanaSprintImg2ImgPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_sana import SanaPipeline
from .pipeline_sana_controlnet import SanaControlNetPipeline
from .pipeline_sana_sprint import SanaSprintPipeline
from .pipeline_sana_sprint_img2img import SanaSprintImg2ImgPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_sana"] = ["SanaPipeline"]
_import_structure["pipeline_sana_controlnet"] = ["SanaControlNetPipeline"]
_import_structure["pipeline_sana_sprint"] = ["SanaSprintPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_sana import SanaPipeline
from .pipeline_sana_controlnet import SanaControlNetPipeline
from .pipeline_sana_sprint import SanaSprintPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
"warning": "magenta",
"error": "bold red",
}
)
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option()
@click.option(
"--repo-root",
default=".",
help="Path to the llama_index repository, defaults to '.'",
)
@click.pass_context
def cli(ctx, repo_root: str):
"""The official CLI for development, testing, and automation in the LlamaIndex monorepo."""
ctx.obj = {
"console": Console(theme=LLAMA_DEV_THEME, soft_wrap=True),
"repo_root": Path(repo_root).resolve(),
}
cli.add_command(pkg)
cli.add_command(test)
|
from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
"warning": "magenta",
"error": "bold red",
}
)
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option()
@click.option(
"--repo-root",
default=".",
help="Path to the llama_index repository, defaults to '.'",
)
@click.pass_context
def cli(ctx, repo_root: str):
"""The official CLI for development, testing, and automation in the LlamaIndex monorepo."""
ctx.obj = {
"console": Console(theme=LLAMA_DEV_THEME),
"repo_root": Path(repo_root).resolve(),
}
cli.add_command(pkg)
cli.add_command(test)
|
import random
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.array.document import DocumentArray
class SampleMixin:
"""A mixin that provides search functionality to DocumentArrays"""
def sample(self, k: int, seed: Optional[int] = None) -> 'DocumentArray':
"""random sample k elements from :class:`DocumentArray` without replacement.
:param k: Number of elements to sample from the document array.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: A sampled list of :class:`Document` represented as :class:`DocumentArray`.
"""
if seed is not None:
random.seed(seed)
# NOTE, this could simplified to random.sample(self, k)
# without getting indices and itemgetter etc.
# however it's only work on DocumentArray.
sampled = random.sample(self, k)
from docarray.array.document import DocumentArray
return DocumentArray(sampled)
def shuffle(self, seed: Optional[int] = None) -> 'DocumentArray':
"""Randomly shuffle documents within the :class:`DocumentArray`.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: The shuffled list of :class:`Document` represented as :class:`DocumentArray`.
"""
return self.sample(len(self), seed=seed)
|
import random
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
from ..document import DocumentArray
class SampleMixin:
""" A mixin that provides search functionality to DocumentArrays"""
def sample(self, k: int, seed: Optional[int] = None) -> 'DocumentArray':
"""random sample k elements from :class:`DocumentArray` without replacement.
:param k: Number of elements to sample from the document array.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: A sampled list of :class:`Document` represented as :class:`DocumentArray`.
"""
if seed is not None:
random.seed(seed)
# NOTE, this could simplified to random.sample(self, k)
# without getting indices and itemgetter etc.
# however it's only work on DocumentArray.
sampled = random.sample(self, k)
from ..document import DocumentArray
return DocumentArray(sampled)
def shuffle(self, seed: Optional[int] = None) -> 'DocumentArray':
"""Randomly shuffle documents within the :class:`DocumentArray`.
:param seed: initialize the random number generator, by default is None. If set will
save the state of the random function to produce certain outputs.
:return: The shuffled list of :class:`Document` represented as :class:`DocumentArray`.
"""
return self.sample(len(self), seed=seed)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
from backend.blocks.linear._api import LinearAPIException, LinearClient
from backend.blocks.linear._auth import (
LINEAR_OAUTH_IS_CONFIGURED,
TEST_CREDENTIALS_INPUT_OAUTH,
TEST_CREDENTIALS_OAUTH,
LinearCredentials,
LinearCredentialsField,
LinearCredentialsInput,
LinearScope,
)
from backend.blocks.linear.models import Project
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class LinearSearchProjectsBlock(Block):
"""Block for searching projects on Linear"""
class Input(BlockSchema):
credentials: LinearCredentialsInput = LinearCredentialsField(
scopes=[LinearScope.READ],
)
term: str = SchemaField(description="Term to search for projects")
class Output(BlockSchema):
projects: list[Project] = SchemaField(description="List of projects")
error: str = SchemaField(description="Error message if issue creation failed")
def __init__(self):
super().__init__(
id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af",
description="Searches for projects on Linear",
input_schema=self.Input,
output_schema=self.Output,
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
test_input={
"term": "Test project",
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
},
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
test_credentials=TEST_CREDENTIALS_OAUTH,
test_output=[
(
"projects",
[
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
],
)
],
test_mock={
"search_projects": lambda *args, **kwargs: [
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
]
},
)
@staticmethod
async def search_projects(
credentials: LinearCredentials,
term: str,
) -> list[Project]:
client = LinearClient(credentials=credentials)
response: list[Project] = await client.try_search_projects(term=term)
return response
async def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the project search"""
try:
projects = await self.search_projects(
credentials=credentials,
term=input_data.term,
)
yield "projects", projects
except LinearAPIException as e:
yield "error", str(e)
except Exception as e:
yield "error", f"Unexpected error: {str(e)}"
|
from backend.blocks.linear._api import LinearAPIException, LinearClient
from backend.blocks.linear._auth import (
LINEAR_OAUTH_IS_CONFIGURED,
TEST_CREDENTIALS_INPUT_OAUTH,
TEST_CREDENTIALS_OAUTH,
LinearCredentials,
LinearCredentialsField,
LinearCredentialsInput,
LinearScope,
)
from backend.blocks.linear.models import Project
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class LinearSearchProjectsBlock(Block):
"""Block for searching projects on Linear"""
class Input(BlockSchema):
credentials: LinearCredentialsInput = LinearCredentialsField(
scopes=[LinearScope.READ],
)
term: str = SchemaField(description="Term to search for projects")
class Output(BlockSchema):
projects: list[Project] = SchemaField(description="List of projects")
error: str = SchemaField(description="Error message if issue creation failed")
def __init__(self):
super().__init__(
id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af",
description="Searches for projects on Linear",
input_schema=self.Input,
output_schema=self.Output,
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
test_input={
"term": "Test project",
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
},
disabled=not LINEAR_OAUTH_IS_CONFIGURED,
test_credentials=TEST_CREDENTIALS_OAUTH,
test_output=[
(
"projects",
[
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
],
)
],
test_mock={
"search_projects": lambda *args, **kwargs: [
Project(
id="abc123",
name="Test project",
description="Test description",
priority=1,
progress=1,
content="Test content",
)
]
},
)
@staticmethod
def search_projects(
credentials: LinearCredentials,
term: str,
) -> list[Project]:
client = LinearClient(credentials=credentials)
response: list[Project] = client.try_search_projects(term=term)
return response
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the project search"""
try:
projects = self.search_projects(
credentials=credentials,
term=input_data.term,
)
yield "projects", projects
except LinearAPIException as e:
yield "error", str(e)
except Exception as e:
yield "error", f"Unexpected error: {str(e)}"
|
from __future__ import annotations
import functools
import operator
from typing import Any, TYPE_CHECKING
import torch
# NOTE: other files rely on the imports below
from torch._dynamo import callback as compilation_callback # noqa: F401
from torch._inductor.runtime.cache_dir_utils import ( # noqa: F401
cache_dir,
default_cache_dir,
triton_cache_dir,
)
if TYPE_CHECKING:
from collections.abc import Hashable
from .triton_compat import Config
def conditional_product(*args: int) -> int:
return functools.reduce(operator.mul, [x for x in args if x])
def ceildiv(number: int, denom: int) -> int:
return -(number // -denom)
def is_power_of_2(n: int) -> bool:
"""Returns whether n = 2 ** m for some integer m."""
return n > 0 and n & n - 1 == 0
def next_power_of_2(n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
"""
Return the total number of bytes the arguments of tensor type takes.
For in/out args, tensor sizes are counted twice: once for reading and
once for writing.
The first num_in_out_args arguments are in out tensors.
"""
return sum(
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
for i, arg in enumerate(args)
if isinstance(arg, torch.Tensor)
)
def triton_config_to_hashable(cfg: Config) -> Hashable:
"""
Convert triton config to a tuple that can uniquely identify it. We can use
the return value as a dictionary key.
"""
items = sorted(cfg.kwargs.items())
items.append(("num_warps", cfg.num_warps))
items.append(("num_stages", cfg.num_stages))
return tuple(items)
def validate_triton_config(cfg: Config) -> None:
# [Note: Triton pre_hook in inductor]
# pre-hook is a lambda function, which we don't attempt to serialize.
# right now, if a pre-hook is attached to the config, it will not be saved;
# and then it won't be used when the config is loaded from cache.
# So we assert - if we do get a pre_hook, it might get ignored after caching.
assert getattr(cfg, "pre_hook", None) is None, (
"triton configs with pre_hooks not supported"
)
def create_bandwidth_info_str(
ms: float,
num_gb: float,
gb_per_s: float,
prefix: str = "",
suffix: str = "",
color: bool = True,
) -> str:
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
slow = ms > 0.012 and gb_per_s < 650
return red_text(info_str) if color and slow else info_str
def get_max_y_grid() -> int:
return 65535
try:
import colorama
HAS_COLORAMA = True
except ModuleNotFoundError:
HAS_COLORAMA = False
colorama = None # type: ignore[assignment]
if HAS_COLORAMA:
def _color_text(msg: str, color: str) -> str:
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
else:
def _color_text(msg: str, color: str) -> str:
return msg
def green_text(msg: str) -> str:
return _color_text(msg, "green")
def yellow_text(msg: str) -> str:
return _color_text(msg, "yellow")
def red_text(msg: str) -> str:
return _color_text(msg, "red")
def blue_text(msg: str) -> str:
return _color_text(msg, "blue")
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
dynamo_timed = torch._dynamo.utils.dynamo_timed # type: ignore[has-type]
def triton_hash_to_path_key(key: str) -> str:
# In early versions of Triton, the hash is directly used in the path name.
# Later, the hash is converted to base64 before being used in the path name.
# Later, the base64 conversion was replaced to the base32
#
# This code tries to import _base64 and falls back to _base32 if _base64 is unavailable.
#
# To handle this, try to import the to-base64-conversion function.
# If it exists, use it; otherwise, try using _base32; if both are unavailable, use the hash directly.
try:
from triton.runtime.cache import _base64
return _base64(key)
except Exception:
try:
from triton.runtime.cache import _base32
return _base32(key)
except Exception:
return key
def compile_mps_shader(source: str) -> Any:
"""
Compiles shader source but raise more actionable error message when needed
"""
try:
return torch.mps.compile_shader(source)
except SyntaxError as err:
raise SyntaxError(f"failed to compile {source} with {err.msg}") from err
|
from __future__ import annotations
import functools
import operator
from typing import Any, TYPE_CHECKING
import torch
# NOTE: other files rely on the imports below
from torch._dynamo import callback as compilation_callback # noqa: F401
from torch._inductor.runtime.cache_dir_utils import ( # noqa: F401
cache_dir,
default_cache_dir,
triton_cache_dir,
)
if TYPE_CHECKING:
from collections.abc import Hashable
from .triton_compat import Config
def conditional_product(*args: int) -> int:
return functools.reduce(operator.mul, [x for x in args if x])
def ceildiv(numer: int, denom: int) -> int:
return -(numer // -denom)
def is_power_of_2(n: int) -> bool:
"""Returns whether n = 2 ** m for some integer m."""
return n > 0 and n & n - 1 == 0
def next_power_of_2(n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
"""
Return the total number of bytes the arguments of tensor type takes.
For in/out args, tensor sizes are counted twice: once for reading and
once for writing.
The first num_in_out_args arguments are in out tensors.
"""
return sum(
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
for i, arg in enumerate(args)
if isinstance(arg, torch.Tensor)
)
def triton_config_to_hashable(cfg: Config) -> Hashable:
"""
Convert triton config to a tuple that can uniquely identify it. We can use
the return value as a dictionary key.
"""
items = sorted(cfg.kwargs.items())
items.append(("num_warps", cfg.num_warps))
items.append(("num_stages", cfg.num_stages))
return tuple(items)
def validate_triton_config(cfg: Config) -> None:
# [Note: Triton pre_hook in inductor]
# pre-hook is a lambda function, which we don't attempt to serialize.
# right now, if a pre-hook is attached to the config, it will not be saved;
# and then it won't be used when the config is loaded from cache.
# So we assert - if we do get a pre_hook, it might get ignored after caching.
assert getattr(cfg, "pre_hook", None) is None, (
"triton configs with pre_hooks not supported"
)
def create_bandwidth_info_str(
ms: float,
num_gb: float,
gb_per_s: float,
prefix: str = "",
suffix: str = "",
color: bool = True,
) -> str:
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
slow = ms > 0.012 and gb_per_s < 650
return red_text(info_str) if color and slow else info_str
def get_max_y_grid() -> int:
return 65535
try:
import colorama
HAS_COLORAMA = True
except ModuleNotFoundError:
HAS_COLORAMA = False
colorama = None # type: ignore[assignment]
if HAS_COLORAMA:
def _color_text(msg: str, color: str) -> str:
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
else:
def _color_text(msg: str, color: str) -> str:
return msg
def green_text(msg: str) -> str:
return _color_text(msg, "green")
def yellow_text(msg: str) -> str:
return _color_text(msg, "yellow")
def red_text(msg: str) -> str:
return _color_text(msg, "red")
def blue_text(msg: str) -> str:
return _color_text(msg, "blue")
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
dynamo_timed = torch._dynamo.utils.dynamo_timed # type: ignore[has-type]
def triton_hash_to_path_key(key: str) -> str:
# In early versions of Triton, the hash is directly used in the path name.
# Later, the hash is converted to base64 before being used in the path name.
# Later, the base64 convertion was replaced to the base32
#
# This code tries to import _base64 and falls back to _base32 if _base64 is unavailable.
#
# To handle this, try to import the to-base64-conversion function.
# If it exists, use it; otherwise, try using _base32; if both are unavailable, use the hash directly.
try:
from triton.runtime.cache import _base64
return _base64(key)
except Exception:
try:
from triton.runtime.cache import _base32
return _base32(key)
except Exception:
return key
def compile_mps_shader(source: str) -> Any:
"""
Compiles shader source but raise more actionable error message when needed
"""
try:
return torch.mps.compile_shader(source)
except SyntaxError as err:
raise SyntaxError(f"failed to compile {source} with {err.msg}") from err
|
import os.path
import numpy as np
from whisper.audio import SAMPLE_RATE, load_audio, log_mel_spectrogram
def test_audio():
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
audio = load_audio(audio_path)
assert audio.ndim == 1
assert SAMPLE_RATE * 10 < audio.shape[0] < SAMPLE_RATE * 12
assert 0 < audio.std() < 1
mel_from_audio = log_mel_spectrogram(audio)
mel_from_file = log_mel_spectrogram(audio_path)
assert np.allclose(mel_from_audio, mel_from_file)
assert mel_from_audio.max() - mel_from_audio.min() <= 2.0
|
import os.path
import numpy as np
from whisper.audio import load_audio, log_mel_spectrogram, SAMPLE_RATE
def test_audio():
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
audio = load_audio(audio_path)
assert audio.ndim == 1
assert SAMPLE_RATE * 10 < audio.shape[0] < SAMPLE_RATE * 12
assert 0 < audio.std() < 1
mel_from_audio = log_mel_spectrogram(audio)
mel_from_file = log_mel_spectrogram(audio_path)
assert np.allclose(mel_from_audio, mel_from_file)
assert mel_from_audio.max() - mel_from_audio.min() <= 2.0
|
import tweepy
from backend.blocks.twitter._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
TWITTER_OAUTH_IS_CONFIGURED,
TwitterCredentials,
TwitterCredentialsField,
TwitterCredentialsInput,
)
from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TwitterHideReplyBlock(Block):
"""
Hides a reply of one of your tweets
"""
class Input(BlockSchema):
credentials: TwitterCredentialsInput = TwitterCredentialsField(
["tweet.read", "tweet.moderate.write", "users.read", "offline.access"]
)
tweet_id: str = SchemaField(
description="ID of the tweet reply to hide",
placeholder="Enter tweet ID",
)
class Output(BlockSchema):
success: bool = SchemaField(description="Whether the operation was successful")
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="07d58b3e-a630-11ef-a030-93701d1a465e",
description="This block hides a reply to a tweet.",
categories={BlockCategory.SOCIAL},
input_schema=TwitterHideReplyBlock.Input,
output_schema=TwitterHideReplyBlock.Output,
disabled=not TWITTER_OAUTH_IS_CONFIGURED,
test_input={
"tweet_id": "1234567890",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("success", True),
],
test_mock={"hide_reply": lambda *args, **kwargs: True},
)
@staticmethod
def hide_reply(
credentials: TwitterCredentials,
tweet_id: str,
):
try:
client = tweepy.Client(
bearer_token=credentials.access_token.get_secret_value()
)
client.hide_reply(id=tweet_id, user_auth=False)
return True
except tweepy.TweepyException:
raise
async def run(
self,
input_data: Input,
*,
credentials: TwitterCredentials,
**kwargs,
) -> BlockOutput:
try:
success = self.hide_reply(
credentials,
input_data.tweet_id,
)
yield "success", success
except Exception as e:
yield "error", handle_tweepy_exception(e)
class TwitterUnhideReplyBlock(Block):
"""
Unhides a reply to a tweet
"""
class Input(BlockSchema):
credentials: TwitterCredentialsInput = TwitterCredentialsField(
["tweet.read", "tweet.moderate.write", "users.read", "offline.access"]
)
tweet_id: str = SchemaField(
description="ID of the tweet reply to unhide",
placeholder="Enter tweet ID",
)
class Output(BlockSchema):
success: bool = SchemaField(description="Whether the operation was successful")
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="fcf9e4e4-a62f-11ef-9d85-57d3d06b616a",
description="This block unhides a reply to a tweet.",
categories={BlockCategory.SOCIAL},
input_schema=TwitterUnhideReplyBlock.Input,
output_schema=TwitterUnhideReplyBlock.Output,
disabled=not TWITTER_OAUTH_IS_CONFIGURED,
test_input={
"tweet_id": "1234567890",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("success", True),
],
test_mock={"unhide_reply": lambda *args, **kwargs: True},
)
@staticmethod
def unhide_reply(
credentials: TwitterCredentials,
tweet_id: str,
):
try:
client = tweepy.Client(
bearer_token=credentials.access_token.get_secret_value()
)
client.unhide_reply(id=tweet_id, user_auth=False)
return True
except tweepy.TweepyException:
raise
async def run(
self,
input_data: Input,
*,
credentials: TwitterCredentials,
**kwargs,
) -> BlockOutput:
try:
success = self.unhide_reply(
credentials,
input_data.tweet_id,
)
yield "success", success
except Exception as e:
yield "error", handle_tweepy_exception(e)
|
import tweepy
from backend.blocks.twitter._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
TWITTER_OAUTH_IS_CONFIGURED,
TwitterCredentials,
TwitterCredentialsField,
TwitterCredentialsInput,
)
from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TwitterHideReplyBlock(Block):
"""
Hides a reply of one of your tweets
"""
class Input(BlockSchema):
credentials: TwitterCredentialsInput = TwitterCredentialsField(
["tweet.read", "tweet.moderate.write", "users.read", "offline.access"]
)
tweet_id: str = SchemaField(
description="ID of the tweet reply to hide",
placeholder="Enter tweet ID",
)
class Output(BlockSchema):
success: bool = SchemaField(description="Whether the operation was successful")
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="07d58b3e-a630-11ef-a030-93701d1a465e",
description="This block hides a reply to a tweet.",
categories={BlockCategory.SOCIAL},
input_schema=TwitterHideReplyBlock.Input,
output_schema=TwitterHideReplyBlock.Output,
disabled=not TWITTER_OAUTH_IS_CONFIGURED,
test_input={
"tweet_id": "1234567890",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("success", True),
],
test_mock={"hide_reply": lambda *args, **kwargs: True},
)
@staticmethod
def hide_reply(
credentials: TwitterCredentials,
tweet_id: str,
):
try:
client = tweepy.Client(
bearer_token=credentials.access_token.get_secret_value()
)
client.hide_reply(id=tweet_id, user_auth=False)
return True
except tweepy.TweepyException:
raise
def run(
self,
input_data: Input,
*,
credentials: TwitterCredentials,
**kwargs,
) -> BlockOutput:
try:
success = self.hide_reply(
credentials,
input_data.tweet_id,
)
yield "success", success
except Exception as e:
yield "error", handle_tweepy_exception(e)
class TwitterUnhideReplyBlock(Block):
"""
Unhides a reply to a tweet
"""
class Input(BlockSchema):
credentials: TwitterCredentialsInput = TwitterCredentialsField(
["tweet.read", "tweet.moderate.write", "users.read", "offline.access"]
)
tweet_id: str = SchemaField(
description="ID of the tweet reply to unhide",
placeholder="Enter tweet ID",
)
class Output(BlockSchema):
success: bool = SchemaField(description="Whether the operation was successful")
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="fcf9e4e4-a62f-11ef-9d85-57d3d06b616a",
description="This block unhides a reply to a tweet.",
categories={BlockCategory.SOCIAL},
input_schema=TwitterUnhideReplyBlock.Input,
output_schema=TwitterUnhideReplyBlock.Output,
disabled=not TWITTER_OAUTH_IS_CONFIGURED,
test_input={
"tweet_id": "1234567890",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("success", True),
],
test_mock={"unhide_reply": lambda *args, **kwargs: True},
)
@staticmethod
def unhide_reply(
credentials: TwitterCredentials,
tweet_id: str,
):
try:
client = tweepy.Client(
bearer_token=credentials.access_token.get_secret_value()
)
client.unhide_reply(id=tweet_id, user_auth=False)
return True
except tweepy.TweepyException:
raise
def run(
self,
input_data: Input,
*,
credentials: TwitterCredentials,
**kwargs,
) -> BlockOutput:
try:
success = self.unhide_reply(
credentials,
input_data.tweet_id,
)
yield "success", success
except Exception as e:
yield "error", handle_tweepy_exception(e)
|
from docarray.document.any_document import AnyDocument
from docarray.document.document import BaseDocument
|
from .any_document import AnyDocument
from .document import BaseDocument
|
_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs)
train_dataloader = dict(dataset=dict(times=4 * 4))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.067,
by_epoch=False,
begin=0,
end=500 * 4),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs)
data = dict(train=dict(times=4 * 4))
lr_config = dict(warmup_iters=500 * 4)
|
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.image.image_tensor import ImageTensor
__all__ = ['ImageNdArray', 'ImageTensor']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor # noqa
__all__.extend(['ImageTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import ( # noqa
ImageTensorFlowTensor,
)
__all__.extend(['ImageTensorFlowTensor'])
|
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.image.image_tensor import ImageTensor
__all__ = ['ImageNdArray', 'ImageTensor']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor # noqa
__all__.extend(['ImageTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import ( # noqa
ImageTensorFlowTensor,
)
__all__.extend(['ImageTensorFlowTensor'])
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.firestore import FirestoreKVStore
class FirestoreDocumentStore(KVDocumentStore):
"""
Firestore Document (Node) store.
A Firestore store for Document and Node objects.
Args:
firestore_kvstore (FirestoreKVStore): Firestore key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
firestore_kvstore: FirestoreKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a FirestoreDocumentStore."""
super().__init__(firestore_kvstore, namespace=namespace, batch_size=batch_size)
@classmethod
def from_database(
cls,
project: str,
database: str,
namespace: Optional[str] = None,
) -> "FirestoreDocumentStore":
"""
Args:
project (str): The project which the client acts on behalf of.
database (str): The database name that the client targets.
namespace (str): namespace for the docstore.
"""
firestore_kvstore = FirestoreKVStore(project=project, database=database)
return cls(firestore_kvstore, namespace)
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.firestore import FirestoreKVStore
class FirestoreDocumentStore(KVDocumentStore):
"""Firestore Document (Node) store.
A Firestore store for Document and Node objects.
Args:
firestore_kvstore (FirestoreKVStore): Firestore key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
firestore_kvstore: FirestoreKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a FirestoreDocumentStore."""
super().__init__(firestore_kvstore, namespace=namespace, batch_size=batch_size)
@classmethod
def from_database(
cls,
project: str,
database: str,
namespace: Optional[str] = None,
) -> "FirestoreDocumentStore":
"""
Args:
project (str): The project which the client acts on behalf of.
database (str): The database name that the client targets.
namespace (str): namespace for the docstore.
"""
firestore_kvstore = FirestoreKVStore(project=project, database=database)
return cls(firestore_kvstore, namespace)
|
from typing import TYPE_CHECKING
from docarray.math.ndarray import get_array_type
if TYPE_CHECKING:
from docarray.typing import ArrayType
import numpy as np
def pdist(
x_mat: 'ArrayType',
metric: str,
) -> 'np.ndarray':
"""Computes Pairwise distances between observations in n-dimensional space.
:param x_mat: Union['np.ndarray','scipy.sparse.csr_matrix', 'scipy.sparse.coo_matrix'] of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
return cdist(x_mat, x_mat, metric)
def cdist(
x_mat: 'ArrayType', y_mat: 'ArrayType', metric: str, device: str = 'cpu'
) -> 'np.ndarray':
"""Computes the pairwise distance between each row of X and each row on Y according to `metric`.
- Let `n_x = x_mat.shape[0]`
- Let `n_y = y_mat.shape[0]`
- Returns a matrix `dist` of shape `(n_x, n_y)` with `dist[i,j] = metric(x_mat[i], y_mat[j])`.
:param x_mat: numpy or scipy array of ndim 2
:param y_mat: numpy or scipy array of ndim 2
:param metric: string describing the metric type
:param device: the computational device, can be either `cpu` or `cuda`.
:return: np.ndarray of ndim 2
"""
x_type = get_array_type(x_mat)
y_type = get_array_type(y_mat)
if x_type != y_type:
raise ValueError(
f'The type of your left-hand side is {x_type}, whereas your right-hand side is {y_type}. '
f'`.cdist()` requires left must be the same type as right.'
)
framework, is_sparse = get_array_type(x_mat)
dists = None
if metric == 'cosine':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_cosine
dists = sparse_cosine(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import cosine
dists = cosine(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import cosine
dists = cosine(x_mat, y_mat, device=device)
elif metric == 'sqeuclidean':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_sqeuclidean
dists = sparse_sqeuclidean(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import sqeuclidean
dists = sqeuclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif metric == 'euclidean':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_euclidean
dists = sparse_euclidean(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import euclidean
dists = euclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import euclidean
dists = euclidean(x_mat, y_mat, device=device)
else:
raise NotImplementedError(f'metric `{metric}` is not supported')
if dists is None:
raise NotImplementedError(
f'{framework} sparse={is_sparse} array is not supported'
)
return dists
|
from typing import TYPE_CHECKING
from docarray.math.ndarray import get_array_type
if TYPE_CHECKING:
from docarray.typing import ArrayType
import numpy as np
def pdist(
x_mat: 'ArrayType',
metric: str,
) -> 'np.ndarray':
"""Computes Pairwise distances between observations in n-dimensional space.
:param x_mat: Union['np.ndarray','scipy.sparse.csr_matrix', 'scipy.sparse.coo_matrix'] of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
return cdist(x_mat, x_mat, metric)
def cdist(
x_mat: 'ArrayType', y_mat: 'ArrayType', metric: str, device: str = 'cpu'
) -> 'np.ndarray':
"""Computes the pairwise distance between each row of X and each row on Y according to `metric`.
- Let `n_x = x_mat.shape[0]`
- Let `n_y = y_mat.shape[0]`
- Returns a matrix `dist` of shape `(n_x, n_y)` with `dist[i,j] = metric(x_mat[i], y_mat[j])`.
:param x_mat: numpy or scipy array of ndim 2
:param y_mat: numpy or scipy array of ndim 2
:param metric: string describing the metric type
:param device: the computational device, can be either `cpu` or `cuda`.
:return: np.ndarray of ndim 2
"""
x_type = get_array_type(x_mat)
y_type = get_array_type(y_mat)
if x_type != y_type:
raise ValueError(
f'The type of your left-hand side is {x_type}, whereas your right-hand side is {y_type}. '
f'`.cdist()` requires left must be the same type as right.'
)
framework, is_sparse = get_array_type(x_mat)
dists = None
if metric == 'cosine':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_cosine
dists = sparse_cosine(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import cosine
dists = cosine(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import cosine
dists = cosine(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import cosine
dists = cosine(x_mat, y_mat, device=device)
elif metric == 'sqeuclidean':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_sqeuclidean
dists = sparse_sqeuclidean(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import sqeuclidean
dists = sqeuclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import sqeuclidean
dists = sqeuclidean(x_mat, y_mat, device=device)
elif metric == 'euclidean':
if framework == 'scipy' and is_sparse:
from docarray.math.distance.numpy import sparse_euclidean
dists = sparse_euclidean(x_mat, y_mat)
elif framework == 'numpy':
from docarray.math.distance.numpy import euclidean
dists = euclidean(x_mat, y_mat)
elif framework == 'tensorflow':
from docarray.math.distance.tensorflow import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'torch':
from docarray.math.distance.torch import euclidean
dists = euclidean(x_mat, y_mat, device=device)
elif framework == 'paddle':
from docarray.math.distance.paddle import euclidean
dists = euclidean(x_mat, y_mat, device=device)
else:
raise NotImplementedError(f'Input metric={metric} is not supported')
if dists is None:
raise NotImplementedError(
f'{framework} sparse={is_sparse} array is not supported'
)
return dists
|
from typing import Any, Dict, List, Tuple, Type, cast
from docarray import BaseDoc, DocList
from docarray.index.abstract import BaseDocIndex
from docarray.utils.filter import filter_docs
from docarray.utils.find import FindResult
def _collect_query_args(method_name: str): # TODO: use partialmethod instead
def inner(self, *args, **kwargs):
if args:
raise ValueError(
f'Positional arguments are not supported for '
f'`{type(self)}.{method_name}`.'
f' Use keyword arguments instead.'
)
updated_query = self._queries + [(method_name, kwargs)]
return type(self)(updated_query)
return inner
def _execute_find_and_filter_query(
doc_index: BaseDocIndex, query: List[Tuple[str, Dict]], reverse_order: bool = False
) -> FindResult:
"""
Executes all find calls from query first using `doc_index.find()`,
and filtering queries after that using DocArray's `filter_docs()`.
Text search is not supported.
:param doc_index: Document index instance.
Either InMemoryExactNNIndex or HnswDocumentIndex.
:param query: Dictionary containing search and filtering configuration.
:param reverse_order: Flag indicating whether to sort in descending order.
If set to False (default), the sorting will be in ascending order.
This option is necessary because, depending on the index, lower scores
can correspond to better matches, and vice versa.
:return: Sorted documents and their corresponding scores.
"""
docs_found = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))([])
filter_conditions = []
filter_limit = None
doc_to_score: Dict[BaseDoc, Any] = {}
for op, op_kwargs in query:
if op == 'find':
docs, scores = doc_index.find(**op_kwargs)
docs_found.extend(docs)
doc_to_score.update(zip(docs.__getattribute__('id'), scores))
elif op == 'filter':
filter_conditions.append(op_kwargs['filter_query'])
filter_limit = op_kwargs.get('limit')
else:
raise ValueError(f'Query operation is not supported: {op}')
doc_index._logger.debug(f'Executing query {query}')
docs_filtered = docs_found
for cond in filter_conditions:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))
docs_filtered = docs_cls(filter_docs(docs_filtered, cond))
if filter_limit:
docs_filtered = docs_filtered[:filter_limit]
doc_index._logger.debug(f'{len(docs_filtered)} results found')
docs_and_scores = zip(
docs_filtered, (doc_to_score[doc.id] for doc in docs_filtered)
)
docs_sorted = sorted(docs_and_scores, key=lambda x: x[1], reverse=reverse_order)
out_docs, out_scores = zip(*docs_sorted)
return FindResult(documents=out_docs, scores=out_scores)
|
from typing import Any, Dict, List, Tuple, Type, cast
from docarray import BaseDoc, DocList
from docarray.index.abstract import BaseDocIndex
from docarray.utils.filter import filter_docs
from docarray.utils.find import FindResult
def _collect_query_args(method_name: str): # TODO: use partialmethod instead
def inner(self, *args, **kwargs):
if args:
raise ValueError(
f'Positional arguments are not supported for '
f'`{type(self)}.{method_name}`.'
f' Use keyword arguments instead.'
)
updated_query = self._queries + [(method_name, kwargs)]
return type(self)(updated_query)
return inner
def _execute_find_and_filter_query(
doc_index: BaseDocIndex, query: List[Tuple[str, Dict]]
) -> FindResult:
"""
Executes all find calls from query first using `doc_index.find()`,
and filtering queries after that using DocArray's `filter_docs()`.
Text search is not supported.
"""
docs_found = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))([])
filter_conditions = []
filter_limit = None
doc_to_score: Dict[BaseDoc, Any] = {}
for op, op_kwargs in query:
if op == 'find':
docs, scores = doc_index.find(**op_kwargs)
docs_found.extend(docs)
doc_to_score.update(zip(docs.__getattribute__('id'), scores))
elif op == 'filter':
filter_conditions.append(op_kwargs['filter_query'])
filter_limit = op_kwargs.get('limit')
else:
raise ValueError(f'Query operation is not supported: {op}')
doc_index._logger.debug(f'Executing query {query}')
docs_filtered = docs_found
for cond in filter_conditions:
docs_cls = DocList.__class_getitem__(cast(Type[BaseDoc], doc_index._schema))
docs_filtered = docs_cls(filter_docs(docs_filtered, cond))
if filter_limit:
docs_filtered = docs_filtered[:filter_limit]
doc_index._logger.debug(f'{len(docs_filtered)} results found')
docs_and_scores = zip(
docs_filtered, (doc_to_score[doc.id] for doc in docs_filtered)
)
docs_sorted = sorted(docs_and_scores, key=lambda x: x[1])
out_docs, out_scores = zip(*docs_sorted)
return FindResult(documents=out_docs, scores=out_scores)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
_base_ = './retinanet_r50_fpn_1x_coco_v1.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
# use caffe img_norm
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')))
|
_base_ = './retinanet_r50_fpn_1x_coco_v1.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../common/mstrain_3x_coco_instance.py',
'../_base_/models/cascade_mask_rcnn_r50_fpn.py'
]
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = ['./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py']
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._typing import is_tensor_union, is_type_tensor
try:
from docarray.typing import TensorFlowTensor
except (ImportError, TypeError):
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
|
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._typing import is_tensor_union, is_type_tensor
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
|
import pytest
from backend.data import db
from backend.executor import Scheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
user_id=test_user.id,
)
scheduler = get_service_client(Scheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
import pytest
from backend.data import db
from backend.executor import Scheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
user_id=test_user.id,
)
scheduler = get_service_client(Scheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
__version__ = "2.8.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .quantization import quantize_embeddings
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"quantize_embeddings",
]
|
__version__ = "2.7.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .quantization import quantize_embeddings
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"quantize_embeddings",
]
|
"""
This tool allows agents to interact with the python-gitlab library
and operate on a GitLab repository.
To use this tool, you must first set as environment variables:
GITLAB_PRIVATE_ACCESS_TOKEN
GITLAB_REPOSITORY -> format: {owner}/{repo}
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.gitlab import GitLabAPIWrapper
class GitLabAction(BaseTool):
"""Tool for interacting with the GitLab API."""
api_wrapper: GitLabAPIWrapper = Field(default_factory=GitLabAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the GitLab API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
"""
This tool allows agents to interact with the python-gitlab library
and operate on a GitLab repository.
To use this tool, you must first set as environment variables:
GITLAB_PRIVATE_ACCESS_TOKEN
GITLAB_REPOSITORY -> format: {owner}/{repo}
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.gitlab import GitLabAPIWrapper
class GitLabAction(BaseTool): # type: ignore[override]
"""Tool for interacting with the GitLab API."""
api_wrapper: GitLabAPIWrapper = Field(default_factory=GitLabAPIWrapper) # type: ignore[arg-type]
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the GitLab API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: list[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = prompt | llm | StrOutputParser()
return chain.invoke({})
|
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = prompt | llm | StrOutputParser()
return chain.invoke({})
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs:
Optional[Union[dict, Sequence[BaseDataSample]]] = None,
mode: str = 'train') \
-> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def before_epoch(self, runner) -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
"""
self.t = time.time()
def before_iter(self, runner, data_batch: DATA_BATCH = None) -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'data_time': time.time() - self.t})
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs:
Optional[Union[dict, Sequence[BaseDataSample]]] = None) \
-> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'time': time.time() - self.t})
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .parallel import MMDataParallel, MMDistributedDataParallel
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'MMDataParallel', 'MMDistributedDataParallel',
'register_all_modules', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
'DistOptimizerHook', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .parallel import MMDataParallel, MMDistributedDataParallel
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'MMDataParallel', 'MMDistributedDataParallel',
'register_all_modules'
]
|
import os
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
@pytest.mark.parametrize(
'file_type, file_source',
[
(VIDEO_MIMETYPE, LOCAL_VIDEO_FILE),
(VIDEO_MIMETYPE, REMOTE_VIDEO_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
(OBJ_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.glb')),
],
)
def test_file_validation(file_type, file_source):
if file_type != VideoUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(VideoUrl, file_source)
else:
parse_obj_as(VideoUrl, file_source)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'config_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bicluster import consensus_score
from ._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from ._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"adjusted_rand_score",
"calinski_harabasz_score",
"completeness_score",
"consensus_score",
"contingency_matrix",
"davies_bouldin_score",
"entropy",
"expected_mutual_information",
"fowlkes_mallows_score",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"rand_score",
"silhouette_samples",
"silhouette_score",
"v_measure_score",
]
|
"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bicluster import consensus_score
from ._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from ._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"adjusted_rand_score",
"rand_score",
"completeness_score",
"pair_confusion_matrix",
"contingency_matrix",
"expected_mutual_information",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"v_measure_score",
"fowlkes_mallows_score",
"entropy",
"silhouette_samples",
"silhouette_score",
"calinski_harabasz_score",
"davies_bouldin_score",
"consensus_score",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale'
]
|
from sentence_transformers.similarity_functions import SimilarityFunction
__all__ = ["SimilarityFunction"]
|
from enum import Enum
class SimilarityFunction(Enum):
COSINE = 0
EUCLIDEAN = 1
MANHATTAN = 2
DOT_PRODUCT = 3
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.19'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.18'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
from docarray.typing.tensor.video.video_jax_array import VideoJaxArray
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.video.video_tensorflow_tensor import (
VideoTensorFlowTensor,
)
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="VideoTensor")
class VideoTensor(AnyTensor, VideoTensorMixin):
"""
Represents a Video tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import VideoTensor
class MyVideoDoc(BaseDoc):
video: VideoTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyVideoDoc(video=tf.zeros(1000, 2))
type(doc.video) # VideoTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyVideoDoc(video=torch.zeros(1000, 2))
type(doc.video) # VideoTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyVideoDoc(video=np.zeros((1000, 2)))
type(doc.video) # VideoNdArray
'''
---
Returns:
Union[VideoTorchTensor, VideoTensorFlowTensor, VideoNdArray]: The validated and converted audio tensor.
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(VideoTorchTensor, value)
elif isinstance(value, torch.Tensor):
return VideoTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(VideoTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return VideoTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(VideoJaxArray, value)
elif isinstance(value, jnp.ndarray):
return VideoJaxArray._docarray_from_native(value) # noqa
if isinstance(value, VideoNdArray):
return cast(VideoNdArray, value)
if isinstance(value, np.ndarray):
try:
return VideoNdArray.validate(value, field, config)
except Exception as e: # noqa
raise e
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.video.video_tensorflow_tensor import (
VideoTensorFlowTensor,
)
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="VideoTensor")
class VideoTensor(AnyTensor, VideoTensorMixin):
"""
Represents a Video tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import VideoTensor
class MyVideoDoc(BaseDoc):
video: VideoTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyVideoDoc(video=tf.zeros(1000, 2))
type(doc.video) # VideoTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyVideoDoc(video=torch.zeros(1000, 2))
type(doc.video) # VideoTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyVideoDoc(video=np.zeros((1000, 2)))
type(doc.video) # VideoNdArray
'''
---
Returns:
Union[VideoTorchTensor, VideoTensorFlowTensor, VideoNdArray]: The validated and converted audio tensor.
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(VideoTorchTensor, value)
elif isinstance(value, torch.Tensor):
return VideoTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(VideoTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return VideoTensorFlowTensor._docarray_from_native(value) # noqa
if isinstance(value, VideoNdArray):
return cast(VideoNdArray, value)
if isinstance(value, np.ndarray):
try:
return VideoNdArray.validate(value, field, config)
except Exception as e: # noqa
raise e
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core import BboxOverlaps2D, bbox_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
|
import numpy as np
import pytest
import torch
from mmdet.core import BboxOverlaps2D, bbox_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.