input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from typing import List, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional_tensor as _FT
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
normalize_image_tensor = _FT.normalize
def normalize_video(video: torch.Tensor, mean: List[float], std: List[float], inplace: bool = False) -> torch.Tensor:
return normalize_image_tensor(video, mean, std, inplace=inplace)
def normalize(
inpt: Union[features.TensorImageTypeJIT, features.TensorVideoTypeJIT],
mean: List[float],
std: List[float],
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
correct_type = isinstance(inpt, torch.Tensor)
else:
correct_type = features.is_simple_tensor(inpt) or isinstance(inpt, (features.Image, features.Video))
inpt = inpt.as_subclass(torch.Tensor)
if not correct_type:
raise TypeError(f"img should be Tensor Image. Got {type(inpt)}")
# Image or Video type should not be retained after normalization due to unknown data range
# Thus we return Tensor for input Image
return normalize_image_tensor(inpt, mean=mean, std=std, inplace=inplace)
def gaussian_blur_image_tensor(
image: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor:
# TODO: consider deprecating integers from sigma on the future
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if len(kernel_size) != 2:
raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
if isinstance(sigma, (int, float)):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
if len(sigma) != 2:
raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
for s in sigma:
if s <= 0.0:
raise ValueError(f"sigma should have positive values. Got {sigma}")
if image.numel() == 0:
return image
shape = image.shape
if image.ndim > 4:
image = image.reshape((-1,) + shape[-3:])
needs_unsquash = True
else:
needs_unsquash = False
output = _FT.gaussian_blur(image, kernel_size, sigma)
if needs_unsquash:
output = output.reshape(shape)
return output
@torch.jit.unused
def gaussian_blur_image_pil(
image: PIL.Image.Image, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = gaussian_blur_image_tensor(t_img, kernel_size=kernel_size, sigma=sigma)
return to_pil_image(output, mode=image.mode)
def gaussian_blur_video(
video: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor:
return gaussian_blur_image_tensor(video, kernel_size, sigma)
def gaussian_blur(
inpt: features.InputTypeJIT, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> features.InputTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features._Feature)):
return gaussian_blur_image_tensor(inpt, kernel_size=kernel_size, sigma=sigma)
elif isinstance(inpt, features._Feature):
return inpt.gaussian_blur(kernel_size=kernel_size, sigma=sigma)
else:
return gaussian_blur_image_pil(inpt, kernel_size=kernel_size, sigma=sigma)
|
from typing import List, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional_tensor as _FT
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
normalize_image_tensor = _FT.normalize
def normalize_video(video: torch.Tensor, mean: List[float], std: List[float], inplace: bool = False) -> torch.Tensor:
return normalize_image_tensor(video, mean, std, inplace=inplace)
def normalize(
inpt: Union[features.TensorImageTypeJIT, features.TensorVideoTypeJIT],
mean: List[float],
std: List[float],
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
correct_type = isinstance(inpt, torch.Tensor)
else:
correct_type = features.is_simple_tensor(inpt) or isinstance(inpt, (features.Image, features.Video))
inpt = inpt.as_subclass(torch.Tensor)
if not correct_type:
raise TypeError(f"img should be Tensor Image. Got {type(inpt)}")
# Image or Video type should not be retained after normalization due to unknown data range
# Thus we return Tensor for input Image
return normalize_image_tensor(inpt, mean=mean, std=std, inplace=inplace)
def gaussian_blur_image_tensor(
image: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor:
# TODO: consider deprecating integers from sigma on the future
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if len(kernel_size) != 2:
raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
if isinstance(sigma, (int, float)):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
if len(sigma) != 2:
raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
for s in sigma:
if s <= 0.0:
raise ValueError(f"sigma should have positive values. Got {sigma}")
if image.numel() == 0:
return image
shape = image.shape
if image.ndim > 4:
image = image.view((-1,) + shape[-3:])
needs_unsquash = True
else:
needs_unsquash = False
output = _FT.gaussian_blur(image, kernel_size, sigma)
if needs_unsquash:
output = output.view(shape)
return output
@torch.jit.unused
def gaussian_blur_image_pil(
image: PIL.Image.Image, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = gaussian_blur_image_tensor(t_img, kernel_size=kernel_size, sigma=sigma)
return to_pil_image(output, mode=image.mode)
def gaussian_blur_video(
video: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor:
return gaussian_blur_image_tensor(video, kernel_size, sigma)
def gaussian_blur(
inpt: features.InputTypeJIT, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> features.InputTypeJIT:
if isinstance(inpt, torch.Tensor) and (torch.jit.is_scripting() or not isinstance(inpt, features._Feature)):
return gaussian_blur_image_tensor(inpt, kernel_size=kernel_size, sigma=sigma)
elif isinstance(inpt, features._Feature):
return inpt.gaussian_blur(kernel_size=kernel_size, sigma=sigma)
else:
return gaussian_blur_image_pil(inpt, kernel_size=kernel_size, sigma=sigma)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fused_semantic_head import FusedSemanticHead
@HEADS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetSemanticHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fused_semantic_head import FusedSemanticHead
@HEADS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetSemanticHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[bytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyCustomLoss, CrossEntropyLoss,
binary_cross_entropy, cross_entropy,
mask_cross_entropy)
from .ddq_detr_aux_loss import DDQAuxLoss
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalCustomLoss, FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss',
'DDQAuxLoss', 'CrossEntropyCustomLoss', 'FocalCustomLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss'
]
|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
yaml_str = ""
if match:
yaml_str = match.group("yaml")
else:
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
else:
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = {k: v for k, v in self.pydantic_object.schema().items()}
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
yaml_str = ""
if match:
yaml_str = match.group("yaml")
else:
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
else:
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = {k: v for k, v in self.pydantic_object.schema().items()}
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
"""Develop installable templates."""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Annotated, Optional
import typer
from langchain_cli.utils.packages import get_langserve_export, get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
) -> None:
"""Creates a new template package."""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and package_name_split[-1] == ""
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__", module_name
)
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__", app_route_code
)
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir)
@package_cli.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
configurable: Annotated[
Optional[bool],
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = None, # defaults to `not chat_playground`
chat_playground: Annotated[
bool,
typer.Option(
"--chat-playground/--no-chat-playground",
help="Whether to include a chat playground route",
),
] = False,
) -> None:
"""Starts a demo app for this template."""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server_chat"
if chat_playground
else (
"langchain_cli.dev_scripts:create_demo_server_configurable"
if configurable
else "langchain_cli.dev_scripts:create_demo_server"
)
)
import uvicorn
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
@package_cli.command()
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None:
"""List all or search for available templates."""
from langchain_cli.utils.github import list_packages
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package)
|
"""
Develop installable templates.
"""
import re
import shutil
import subprocess
from pathlib import Path
from typing import Annotated, Optional
import typer
from langchain_cli.utils.packages import get_langserve_export, get_package_root
package_cli = typer.Typer(no_args_is_help=True, add_completion=False)
@package_cli.command()
def new(
name: Annotated[str, typer.Argument(help="The name of the folder to create")],
with_poetry: Annotated[
bool,
typer.Option("--with-poetry/--no-poetry", help="Don't run poetry install"),
] = False,
):
"""
Creates a new template package.
"""
computed_name = name if name != "." else Path.cwd().name
destination_dir = Path.cwd() / name if name != "." else Path.cwd()
# copy over template from ../package_template
project_template_dir = Path(__file__).parents[1] / "package_template"
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name == ".")
package_name_split = computed_name.split("/")
package_name = (
package_name_split[-2]
if len(package_name_split) > 1 and package_name_split[-1] == ""
else package_name_split[-1]
)
module_name = re.sub(
r"[^a-zA-Z0-9_]",
"_",
package_name,
)
# generate app route code
chain_name = f"{module_name}_chain"
app_route_code = (
f"from {module_name} import chain as {chain_name}\n\n"
f'add_routes(app, {chain_name}, path="/{package_name}")'
)
# replace template strings
pyproject = destination_dir / "pyproject.toml"
pyproject_contents = pyproject.read_text()
pyproject.write_text(
pyproject_contents.replace("__package_name__", package_name).replace(
"__module_name__", module_name
)
)
# move module folder
package_dir = destination_dir / module_name
shutil.move(destination_dir / "package_template", package_dir)
# update init
init = package_dir / "__init__.py"
init_contents = init.read_text()
init.write_text(init_contents.replace("__module_name__", module_name))
# replace readme
readme = destination_dir / "README.md"
readme_contents = readme.read_text()
readme.write_text(
readme_contents.replace("__package_name__", package_name).replace(
"__app_route_code__", app_route_code
)
)
# poetry install
if with_poetry:
subprocess.run(["poetry", "install"], cwd=destination_dir)
@package_cli.command()
def serve(
*,
port: Annotated[
Optional[int], typer.Option(help="The port to run the server on")
] = None,
host: Annotated[
Optional[str], typer.Option(help="The host to run the server on")
] = None,
configurable: Annotated[
Optional[bool],
typer.Option(
"--configurable/--no-configurable",
help="Whether to include a configurable route",
),
] = None, # defaults to `not chat_playground`
chat_playground: Annotated[
bool,
typer.Option(
"--chat-playground/--no-chat-playground",
help="Whether to include a chat playground route",
),
] = False,
) -> None:
"""
Starts a demo app for this template.
"""
# load pyproject.toml
project_dir = get_package_root()
pyproject = project_dir / "pyproject.toml"
# get langserve export - throws KeyError if invalid
get_langserve_export(pyproject)
host_str = host if host is not None else "127.0.0.1"
script = (
"langchain_cli.dev_scripts:create_demo_server_chat"
if chat_playground
else (
"langchain_cli.dev_scripts:create_demo_server_configurable"
if configurable
else "langchain_cli.dev_scripts:create_demo_server"
)
)
import uvicorn
uvicorn.run(
script,
factory=True,
reload=True,
port=port if port is not None else 8000,
host=host_str,
)
@package_cli.command()
def list(contains: Annotated[Optional[str], typer.Argument()] = None) -> None:
"""
List all or search for available templates.
"""
from langchain_cli.utils.github import list_packages
packages = list_packages(contains=contains)
for package in packages:
typer.echo(package)
|
from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs
|
from jina import Executor, requests, DocumentArray
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
|
"""Test HuggingFaceHub embeddings."""
import pytest
from langchain_community.embeddings import HuggingFaceHubEmbeddings
def test_huggingfacehub_embedding_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
async def test_huggingfacehub_embedding_async_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings()
output = await embedding.aembed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingfacehub_embedding_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
async def test_huggingfacehub_embedding_async_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 768
def test_huggingfacehub_embedding_invalid_repo() -> None:
"""Test huggingfacehub embedding repo id validation."""
# Only sentence-transformers models are currently supported.
with pytest.raises(ValueError):
HuggingFaceHubEmbeddings(repo_id="allenai/specter")
|
"""Test HuggingFaceHub embeddings."""
import pytest
from langchain_community.embeddings import HuggingFaceHubEmbeddings
def test_huggingfacehub_embedding_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
async def test_huggingfacehub_embedding_async_documents() -> None:
"""Test huggingfacehub embeddings."""
documents = ["foo bar"]
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = await embedding.aembed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_huggingfacehub_embedding_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 768
async def test_huggingfacehub_embedding_async_query() -> None:
"""Test huggingfacehub embeddings."""
document = "foo bar"
embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg]
output = await embedding.aembed_query(document)
assert len(output) == 768
def test_huggingfacehub_embedding_invalid_repo() -> None:
"""Test huggingfacehub embedding repo id validation."""
# Only sentence-transformers models are currently supported.
with pytest.raises(ValueError):
HuggingFaceHubEmbeddings(repo_id="allenai/specter") # type: ignore[call-arg]
|
# mypy: allow-untyped-defs
import contextlib
import torch
__all__ = [
"start",
"stop",
"profile",
"metal_capture",
"is_metal_capture_enabled",
"is_capturing_metal",
]
def start(mode: str = "interval", wait_until_completed: bool = False) -> None:
r"""Start OS Signpost tracing from MPS backend.
The generated OS Signposts could be recorded and viewed in
XCode Instruments Logging tool.
Args:
mode(str): OS Signpost tracing mode could be "interval", "event",
or both "interval,event".
The interval mode traces the duration of execution of the operations,
whereas event mode marks the completion of executions.
See document `Recording Performance Data`_ for more info.
wait_until_completed(bool): Waits until the MPS Stream complete
executing each encoded GPU operation. This helps generating single
dispatches on the trace's timeline.
Note that enabling this option would affect the performance negatively.
.. _Recording Performance Data:
https://developer.apple.com/documentation/os/logging/recording_performance_data
"""
mode_normalized = mode.lower().replace(" ", "")
torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed)
def stop():
r"""Stops generating OS Signpost tracing from MPS backend."""
torch._C._mps_profilerStopTrace()
@contextlib.contextmanager
def profile(mode: str = "interval", wait_until_completed: bool = False):
r"""Context Manager to enabling generating OS Signpost tracing from MPS backend.
Args:
mode(str): OS Signpost tracing mode could be "interval", "event",
or both "interval,event".
The interval mode traces the duration of execution of the operations,
whereas event mode marks the completion of executions.
See document `Recording Performance Data`_ for more info.
wait_until_completed(bool): Waits until the MPS Stream complete
executing each encoded GPU operation. This helps generating single
dispatches on the trace's timeline.
Note that enabling this option would affect the performance negatively.
.. _Recording Performance Data:
https://developer.apple.com/documentation/os/logging/recording_performance_data
"""
try:
start(mode, wait_until_completed)
yield
finally:
stop()
def is_metal_capture_enabled() -> bool:
"""Checks if `metal_capture` context manager is usable
To enable metal capture, set MTL_CAPTURE_ENABLED envvar
"""
return torch._C._mps_isCaptureEnabled() # type: ignore[attr-defined]
def is_capturing_metal() -> bool:
"""Checks if metal capture is in progress"""
return torch._C._mps_isCapturing() # type: ignore[attr-defined]
@contextlib.contextmanager
def metal_capture(fname: str):
"""Context manager that enables capturing of Metal calls into gputrace"""
try:
torch._C._mps_startCapture(fname) # type: ignore[attr-defined]
yield
# Drain all the work that were enqueued during the context call
torch.mps.synchronize()
finally:
torch._C._mps_stopCapture() # type: ignore[attr-defined]
|
# mypy: allow-untyped-defs
import contextlib
import torch
__all__ = [
"start",
"stop",
"profile",
"metal_capture",
"is_metal_capture_enabled",
"is_capturing_metal",
]
def start(mode: str = "interval", wait_until_completed: bool = False) -> None:
r"""Start OS Signpost tracing from MPS backend.
The generated OS Signposts could be recorded and viewed in
XCode Instruments Logging tool.
Args:
mode(str): OS Signpost tracing mode could be "interval", "event",
or both "interval,event".
The interval mode traces the duration of execution of the operations,
whereas event mode marks the completion of executions.
See document `Recording Performance Data`_ for more info.
wait_until_completed(bool): Waits until the MPS Stream complete
executing each encoded GPU operation. This helps generating single
dispatches on the trace's timeline.
Note that enabling this option would affect the performance negatively.
.. _Recording Performance Data:
https://developer.apple.com/documentation/os/logging/recording_performance_data
"""
mode_normalized = mode.lower().replace(" ", "")
torch._C._mps_profilerStartTrace(mode_normalized, wait_until_completed)
def stop():
r"""Stops generating OS Signpost tracing from MPS backend."""
torch._C._mps_profilerStopTrace()
@contextlib.contextmanager
def profile(mode: str = "interval", wait_until_completed: bool = False):
r"""Context Manager to enabling generating OS Signpost tracing from MPS backend.
Args:
mode(str): OS Signpost tracing mode could be "interval", "event",
or both "interval,event".
The interval mode traces the duration of execution of the operations,
whereas event mode marks the completion of executions.
See document `Recording Performance Data`_ for more info.
wait_until_completed(bool): Waits until the MPS Stream complete
executing each encoded GPU operation. This helps generating single
dispatches on the trace's timeline.
Note that enabling this option would affect the performance negatively.
.. _Recording Performance Data:
https://developer.apple.com/documentation/os/logging/recording_performance_data
"""
try:
start(mode, wait_until_completed)
yield
finally:
stop()
def is_metal_capture_enabled() -> bool:
"""Checks if `metal_capture` context manager is usable
To enable metal capture, set MTL_CAPTURE_ENABLED envvar
"""
return torch._C._mps_isCaptureEnabled() # type: ignore[attr-defined]
def is_capturing_metal() -> bool:
"""Cheks if metal capture is in progress"""
return torch._C._mps_isCapturing() # type: ignore[attr-defined]
@contextlib.contextmanager
def metal_capture(fname: str):
"""Conext manager that enables capturing of Metal calls into gputrace"""
try:
torch._C._mps_startCapture(fname) # type: ignore[attr-defined]
yield
# Drain all the work that were enqueued during the context call
torch.mps.synchronize()
finally:
torch._C._mps_stopCapture() # type: ignore[attr-defined]
|
__version__ = '0.30.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.21.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
_base_ = './mask-rcnn_r101_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmengine.logging import BaseGlobalAccessible, MetaGlobalAccessible
class SubClassA(BaseGlobalAccessible):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class SubClassB(BaseGlobalAccessible):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class TestGlobalMeta:
def test_init(self):
# Subclass's constructor does not contain name arguments will raise an
# error.
with pytest.raises(AssertionError):
class SubClassNoName1(metaclass=MetaGlobalAccessible):
def __init__(self, a, *args, **kwargs):
pass
# The constructor of subclasses must have default values for all
# arguments except name. Since `MetaGlobalAccessible` cannot tell which
# parameter does not have ha default value, we should test invalid
# subclasses separately.
with pytest.raises(AssertionError):
class SubClassNoDefault1(metaclass=MetaGlobalAccessible):
def __init__(self, a, name='', *args, **kwargs):
pass
with pytest.raises(AssertionError):
class SubClassNoDefault2(metaclass=MetaGlobalAccessible):
def __init__(self, a, b, name='', *args, **kwargs):
pass
# Valid subclass.
class GlobalAccessible1(metaclass=MetaGlobalAccessible):
def __init__(self, name):
self.name = name
# Allow name not to be the first arguments.
class GlobalAccessible2(metaclass=MetaGlobalAccessible):
def __init__(self, a=1, name=''):
self.name = name
assert GlobalAccessible1.root.name == 'root'
class TestBaseGlobalAccessible:
def test_init(self):
# test get root instance.
assert BaseGlobalAccessible.root._name == 'root'
# test create instance by name.
base_cls = BaseGlobalAccessible('name')
assert base_cls._name == 'name'
def test_create_instance(self):
# SubClass should manage their own `_instance_dict`.
SubClassA.create_instance('instance_a')
SubClassB.create_instance('instance_b')
assert SubClassB._instance_dict != SubClassA._instance_dict
# test `message_hub` can create by name.
message_hub = SubClassA.create_instance('name1')
assert message_hub.instance_name == 'name1'
# test return root message_hub
message_hub = SubClassA.create_instance()
assert message_hub.instance_name == 'root'
# test default get root `message_hub`.
def test_get_instance(self):
message_hub = SubClassA.get_instance()
assert message_hub.instance_name == 'root'
# test default get latest `message_hub`.
message_hub = SubClassA.create_instance('name2')
message_hub = SubClassA.get_instance(current=True)
assert message_hub.instance_name == 'name2'
message_hub.mark = -1
# test get latest `message_hub` repeatedly.
message_hub = SubClassA.create_instance('name3')
assert message_hub.instance_name == 'name3'
message_hub = SubClassA.get_instance(current=True)
assert message_hub.instance_name == 'name3'
# test get root repeatedly.
message_hub = SubClassA.get_instance()
assert message_hub.instance_name == 'root'
# test get name1 repeatedly
message_hub = SubClassA.get_instance('name2')
assert message_hub.mark == -1
# create_instance will raise error if `name` is not specified and
# given other arguments
with pytest.raises(ValueError):
SubClassA.create_instance(a=1)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmengine.logging import BaseGlobalAccessible, MetaGlobalAccessible
class SubClassA(BaseGlobalAccessible):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class SubClassB(BaseGlobalAccessible):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class TestGlobalMeta:
def test_init(self):
# Subclass's constructor does not contain name arguments will raise an
# error.
with pytest.raises(AssertionError):
class SubClassNoName(metaclass=MetaGlobalAccessible):
def __init__(self, *args, **kwargs):
pass
# Subclass's constructor contains arguments without default value will
# raise an error.
with pytest.raises(AssertionError):
class SubClassNoDefault(metaclass=MetaGlobalAccessible):
def __init__(self, a, name='', *args, **kwargs):
pass
class GlobalAccessible(metaclass=MetaGlobalAccessible):
def __init__(self, name=''):
self.name = name
assert GlobalAccessible.root.name == 'root'
class TestBaseGlobalAccessible:
def test_init(self):
# test get root instance.
assert BaseGlobalAccessible.root._name == 'root'
# test create instance by name.
base_cls = BaseGlobalAccessible('name')
assert base_cls._name == 'name'
def test_create_instance(self):
# SubClass should manage their own `_instance_dict`.
SubClassA.create_instance('instance_a')
SubClassB.create_instance('instance_b')
assert SubClassB._instance_dict != SubClassA._instance_dict
# test `message_hub` can create by name.
message_hub = SubClassA.create_instance('name1')
assert message_hub.instance_name == 'name1'
# test return root message_hub
message_hub = SubClassA.create_instance()
assert message_hub.instance_name == 'root'
# test default get root `message_hub`.
def test_get_instance(self):
message_hub = SubClassA.get_instance()
assert message_hub.instance_name == 'root'
# test default get latest `message_hub`.
message_hub = SubClassA.create_instance('name2')
message_hub = SubClassA.get_instance(current=True)
assert message_hub.instance_name == 'name2'
message_hub.mark = -1
# test get latest `message_hub` repeatedly.
message_hub = SubClassA.create_instance('name3')
assert message_hub.instance_name == 'name3'
message_hub = SubClassA.get_instance(current=True)
assert message_hub.instance_name == 'name3'
# test get root repeatedly.
message_hub = SubClassA.get_instance()
assert message_hub.instance_name == 'root'
# test get name1 repeatedly
message_hub = SubClassA.get_instance('name2')
assert message_hub.mark == -1
# create_instance will raise error if `name` is not specified and
# given other arguments
with pytest.raises(ValueError):
SubClassA.create_instance(a=1)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import MochiTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class MochiTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = MochiTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
# Overriding it because of the transformer size.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
}
@property
def input_shape(self):
return (4, 2, 16, 16)
@property
def output_shape(self):
return (4, 2, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"num_attention_heads": 2,
"attention_head_dim": 8,
"num_layers": 2,
"pooled_projection_dim": 16,
"in_channels": 4,
"out_channels": None,
"qk_norm": "rms_norm",
"text_embed_dim": 16,
"time_embed_dim": 4,
"activation_fn": "swiglu",
"max_sequence_length": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"MochiTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import MochiTransformer3DModel
from diffusers.utils.testing_utils import enable_full_determinism, torch_device
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class MochiTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = MochiTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
# Overriding it because of the transformer size.
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 16
width = 16
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": encoder_attention_mask,
}
@property
def input_shape(self):
return (4, 2, 16, 16)
@property
def output_shape(self):
return (4, 2, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"num_attention_heads": 2,
"attention_head_dim": 8,
"num_layers": 2,
"pooled_projection_dim": 16,
"in_channels": 4,
"out_channels": None,
"qk_norm": "rms_norm",
"text_embed_dim": 16,
"time_embed_dim": 4,
"activation_fn": "swiglu",
"max_sequence_length": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"MochiTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.4'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Convolution node name match via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import ops
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class ConvolutionNodeNameTest(xla_test.XLATestCase):
"""Verify convolution node name match.
Verify convolution node names on TPU and CPU match with dilation > 1.
"""
def _verifyNodeNameMatch(self, layer, input_sizes, filter_sizes, strides,
dilations):
def _GetNodeNames(use_xla):
with self.session():
input_tensor = array_ops.placeholder(np.float32, shape=input_sizes)
if use_xla:
with self.device_scope():
# pylint: disable=protected-access
graph = ops.get_default_graph()
graph._set_control_flow_context(
control_flow_ops.XLAControlFlowContext())
# pylint: enable=protected-access
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
return [n.name for n in ops.get_default_graph().as_graph_def().node]
else:
with ops.device("CPU"):
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
names = [
n.name for n in ops.get_default_graph().as_graph_def().node
]
# filter out space to depth ops.
return [
name for name in names
if "space" not in name and "Space" not in name
]
xla_names = _GetNodeNames(use_xla=True)
no_xla_names = _GetNodeNames(use_xla=False)
# CPU path creates some additional nodes to handle dilations.
# TODO(b/138804006): Remove this when CPU & GPU support dilations.
filtered_no_xla_names = []
for name in no_xla_names:
if ("dilation_rate" in name or "filter_shape" in name or "stack" in name):
continue
else:
filtered_no_xla_names.append(name)
self.assertListEqual(xla_names, filtered_no_xla_names)
def testConv1DNodeNameMatch(self):
input_sizes = [8, 16, 3]
filter_sizes = [7]
strides = 1
dilations = [2]
layer = layers.Conv1D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv2DNodeNameMatch(self):
input_sizes = [8, 16, 16, 3]
filter_sizes = [7, 7]
strides = 1
dilations = [2, 2]
layer = layers.Conv2D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv3DNodeNameMatch(self):
input_sizes = [8, 16, 16, 16, 3]
filter_sizes = [7, 7, 7]
strides = 1
dilations = [2, 2, 2]
layer = layers.Conv3D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
if __name__ == "__main__":
googletest.main()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Convolution node name match via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import ops
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import googletest
class ConvolutionNodeNameTest(xla_test.XLATestCase):
"""Verify convolution node name match.
Verify convolution node names on TPU and CPU match with dilation > 1.
"""
def _verifyNodeNameMatch(self, layer, input_sizes, filter_sizes, strides,
dilations):
def _GetNodeNames(use_xla):
with self.session():
input_tensor = array_ops.placeholder(np.float32, shape=input_sizes)
if use_xla:
with self.test_scope():
# pylint: disable=protected-access
graph = ops.get_default_graph()
graph._set_control_flow_context(
control_flow_ops.XLAControlFlowContext())
# pylint: enable=protected-access
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
return [n.name for n in ops.get_default_graph().as_graph_def().node]
else:
with ops.device("CPU"):
conv2d_op = layer(
filters=64,
kernel_size=filter_sizes,
dilation_rate=dilations,
padding="same")
_ = conv2d_op(input_tensor)
names = [
n.name for n in ops.get_default_graph().as_graph_def().node
]
# filter out space to depth ops.
return [
name for name in names
if "space" not in name and "Space" not in name
]
xla_names = _GetNodeNames(use_xla=True)
no_xla_names = _GetNodeNames(use_xla=False)
# CPU path creates some additional nodes to handle dilations.
# TODO(b/138804006): Remove this when CPU & GPU support dilations.
filtered_no_xla_names = []
for name in no_xla_names:
if ("dilation_rate" in name or "filter_shape" in name or "stack" in name):
continue
else:
filtered_no_xla_names.append(name)
self.assertListEqual(xla_names, filtered_no_xla_names)
def testConv1DNodeNameMatch(self):
input_sizes = [8, 16, 3]
filter_sizes = [7]
strides = 1
dilations = [2]
layer = layers.Conv1D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv2DNodeNameMatch(self):
input_sizes = [8, 16, 16, 3]
filter_sizes = [7, 7]
strides = 1
dilations = [2, 2]
layer = layers.Conv2D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
def testConv3DNodeNameMatch(self):
input_sizes = [8, 16, 16, 16, 3]
filter_sizes = [7, 7, 7]
strides = 1
dilations = [2, 2, 2]
layer = layers.Conv3D
self._verifyNodeNameMatch(layer, input_sizes, filter_sizes, strides,
dilations)
if __name__ == "__main__":
googletest.main()
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import csv
import logging
import math
import os
from datetime import datetime
from zipfile import ZipFile
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
from zipfile import ZipFile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
import random
import pytest
from datasets import Dataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset):
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
|
import pytest
from datasets import Dataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler
import random
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset):
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_cogview4 import CogView4Transformer2DModel
from .transformer_easyanimate import EasyAnimateTransformer3DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hidream_image import HiDreamImageTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_hunyuan_video_framepack import HunyuanVideoFramepackTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
from .transformer_wan import WanTransformer3DModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_cogview4 import CogView4Transformer2DModel
from .transformer_easyanimate import EasyAnimateTransformer3DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hidream_image import HiDreamImageTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
from .transformer_wan import WanTransformer3DModel
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
_base_ = './scnet_x101_64x4d_fpn_20e_coco.py'
data = dict(samples_per_gpu=1, workers_per_gpu=1)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = './scnet_x101_64x4d_fpn_20e_coco.py'
data = dict(samples_per_gpu=1, workers_per_gpu=1)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0,<8.0.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0,<8.0.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
"""Optimization related classes and functions."""
import logging
from typing import Any, Dict, List, Optional, Literal
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = "Given the context, please answer the final question"
def format_metadata(nodes=List[NodeWithScore]):
return {node.node.id_: node.metadata for node in nodes}
class LongLLMLinguaPostprocessor(BaseNodePostprocessor):
"""
Optimization of nodes.
Compress using LongLLMLingua paper.
"""
instruction_str: str = Field(
default=DEFAULT_INSTRUCTION_STR, description="Instruction string."
)
target_token: int = Field(
default=-1, description="Target number of compressed tokens."
)
use_llmlingua2: bool = Field(
default=False, description="Whether to use the llmlingua2 approach"
)
rank_method: str = Field(default="longllmlingua", description="Ranking method.")
additional_compress_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional compress kwargs."
)
_llm_lingua: Any = PrivateAttr()
def __init__(
self,
model_name: str = "NousResearch/Llama-2-7b-hf",
device_map: Literal["cuda", "cpu", "mps"] = "cuda",
model_config: Optional[dict] = {},
open_api_config: Optional[dict] = {},
instruction_str: str = DEFAULT_INSTRUCTION_STR,
target_token: float = -1,
rank_method: str = "longllmlingua",
additional_compress_kwargs: Optional[Dict[str, Any]] = {},
use_llmlingua2: bool = False,
):
"""LongLLMLingua Compressor for Node Context."""
from llmlingua import PromptCompressor
super().__init__(
instruction_str=instruction_str,
target_token=target_token,
rank_method=rank_method,
additional_compress_kwargs=additional_compress_kwargs,
use_llmlingua2=use_llmlingua2,
)
open_api_config = open_api_config or {}
additional_compress_kwargs = additional_compress_kwargs or {}
if self.use_llmlingua2 is True:
assert (
model_name == "microsoft/llmlingua-2-xlm-roberta-large-meetingbank"
), (
'Must use "microsoft/llmlingua-2-xlm-roberta-large-meetingbank" as the model name for llmlingua2'
)
self._llm_lingua = PromptCompressor(
model_name=model_name,
device_map=device_map,
model_config=model_config,
open_api_config=open_api_config,
use_llmlingua2=self.use_llmlingua2,
)
@classmethod
def class_name(cls) -> str:
return "LongLLMLinguaPostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Optimize a node text given the query by shortening the node text."""
if query_bundle is None:
raise ValueError("Query bundle is required.")
# The prompt compression for llmlingua2 works on raw texts, that's why it's better to just extract metadata texts.
context_texts = [n.text for n in nodes]
# Preserve metadata for prompt compressed nodes
metadata = format_metadata(nodes)
new_context_texts = "".join(context_texts)
# You can use it this way, although the question-aware fine-grained compression hasn't been enabled.
compressed_prompt = self._llm_lingua.compress_prompt(
new_context_texts, # ! Replace the previous context_list
instruction=self.instruction_str,
question=query_bundle.query_str,
# target_token=2000,
target_token=self.target_token,
rank_method=self.rank_method,
**self.additional_compress_kwargs,
)
compressed_prompt_txt = compressed_prompt["compressed_prompt"]
# separate out the question and instruction (appended to top and bottom)
compressed_prompt_txt_list = compressed_prompt_txt.split("\n\n")
if self.use_llmlingua2 is False:
compressed_prompt_txt_list = compressed_prompt_txt_list[1:-1]
# return nodes for each list
keys_to_exclude = list(metadata.keys())
return [
NodeWithScore(
node=TextNode(
text=t,
metadata=metadata,
excluded_llm_metadata_keys=keys_to_exclude,
excluded_embed_metadata_keys=keys_to_exclude,
)
)
for t in compressed_prompt_txt_list
]
|
"""Optimization related classes and functions."""
import logging
from typing import Any, Dict, List, Optional, Literal
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = "Given the context, please answer the final question"
def format_metadata(nodes=List[NodeWithScore]):
return {node.node.id_: node.metadata for node in nodes}
class LongLLMLinguaPostprocessor(BaseNodePostprocessor):
"""
Optimization of nodes.
Compress using LongLLMLingua paper.
"""
instruction_str: str = Field(
default=DEFAULT_INSTRUCTION_STR, description="Instruction string."
)
target_token: int = Field(
default=-1, description="Target number of compressed tokens."
)
use_llmlingua2: bool = Field(
default=False, description="Whether to use the llmlingua2 approach"
)
rank_method: str = Field(default="longllmlingua", description="Ranking method.")
additional_compress_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional compress kwargs."
)
_llm_lingua: Any = PrivateAttr()
def __init__(
self,
model_name: str = "NousResearch/Llama-2-7b-hf",
device_map: Literal["cuda", "cpu", "mps"] = "cuda",
model_config: Optional[dict] = {},
open_api_config: Optional[dict] = {},
instruction_str: str = DEFAULT_INSTRUCTION_STR,
target_token: float = -1,
rank_method: str = "longllmlingua",
additional_compress_kwargs: Optional[Dict[str, Any]] = {},
use_llmlingua2: bool = False,
):
"""LongLLMLingua Compressor for Node Context."""
from llmlingua import PromptCompressor
super().__init__(
instruction_str=instruction_str,
target_token=target_token,
rank_method=rank_method,
additional_compress_kwargs=additional_compress_kwargs,
use_llmlingua2=use_llmlingua2,
)
open_api_config = open_api_config or {}
additional_compress_kwargs = additional_compress_kwargs or {}
if self.use_llmlingua2 is True:
assert (
model_name == "microsoft/llmlingua-2-xlm-roberta-large-meetingbank"
), 'Must use "microsoft/llmlingua-2-xlm-roberta-large-meetingbank" as the model name for llmlingua2'
self._llm_lingua = PromptCompressor(
model_name=model_name,
device_map=device_map,
model_config=model_config,
open_api_config=open_api_config,
use_llmlingua2=self.use_llmlingua2,
)
@classmethod
def class_name(cls) -> str:
return "LongLLMLinguaPostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Optimize a node text given the query by shortening the node text."""
if query_bundle is None:
raise ValueError("Query bundle is required.")
# The prompt compression for llmlingua2 works on raw texts, that's why it's better to just extract metadata texts.
context_texts = [n.text for n in nodes]
# Preserve metadata for prompt compressed nodes
metadata = format_metadata(nodes)
new_context_texts = "".join(context_texts)
# You can use it this way, although the question-aware fine-grained compression hasn't been enabled.
compressed_prompt = self._llm_lingua.compress_prompt(
new_context_texts, # ! Replace the previous context_list
instruction=self.instruction_str,
question=query_bundle.query_str,
# target_token=2000,
target_token=self.target_token,
rank_method=self.rank_method,
**self.additional_compress_kwargs,
)
compressed_prompt_txt = compressed_prompt["compressed_prompt"]
# separate out the question and instruction (appended to top and bottom)
compressed_prompt_txt_list = compressed_prompt_txt.split("\n\n")
if self.use_llmlingua2 is False:
compressed_prompt_txt_list = compressed_prompt_txt_list[1:-1]
# return nodes for each list
keys_to_exclude = list(metadata.keys())
return [
NodeWithScore(
node=TextNode(
text=t,
metadata=metadata,
excluded_llm_metadata_keys=keys_to_exclude,
excluded_embed_metadata_keys=keys_to_exclude,
)
)
for t in compressed_prompt_txt_list
]
|
from datetime import datetime, timezone
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket
from backend.data.execution import ExecutionResult, ExecutionStatus
from backend.server.conn_manager import ConnectionManager
from backend.server.model import Methods, WsMessage
@pytest.fixture
def connection_manager() -> ConnectionManager:
return ConnectionManager()
@pytest.fixture
def mock_websocket() -> AsyncMock:
websocket: AsyncMock = AsyncMock(spec=WebSocket)
websocket.send_text = AsyncMock()
return websocket
@pytest.mark.asyncio
async def test_connect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.connect(mock_websocket)
assert mock_websocket in connection_manager.active_connections
mock_websocket.accept.assert_called_once()
def test_disconnect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.active_connections.add(mock_websocket)
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
connection_manager.disconnect(mock_websocket)
assert mock_websocket not in connection_manager.active_connections
assert mock_websocket not in connection_manager.subscriptions["test_graph_1"]
@pytest.mark.asyncio
async def test_subscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.subscribe(
user_id="user-1",
graph_id="test_graph",
graph_version=1,
websocket=mock_websocket,
)
assert mock_websocket in connection_manager.subscriptions["user-1_test_graph_1"]
@pytest.mark.asyncio
async def test_unsubscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["user-1_test_graph_1"] = {mock_websocket}
await connection_manager.unsubscribe(
user_id="user-1",
graph_id="test_graph",
graph_version=1,
websocket=mock_websocket,
)
assert "test_graph" not in connection_manager.subscriptions
@pytest.mark.asyncio
async def test_send_execution_result(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["user-1_test_graph_1"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
user_id="user-1",
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_called_once_with(
WsMessage(
method=Methods.EXECUTION_EVENT,
channel="user-1_test_graph_1",
data=result.model_dump(),
).model_dump_json()
)
@pytest.mark.asyncio
async def test_send_execution_result_user_mismatch(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["user-1_test_graph_1"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
user_id="user-2",
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
@pytest.mark.asyncio
async def test_send_execution_result_no_subscribers(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
result: ExecutionResult = ExecutionResult(
user_id="user-1",
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(),
queue_time=None,
start_time=datetime.now(),
end_time=datetime.now(),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
|
from datetime import datetime, timezone
from unittest.mock import AsyncMock
import pytest
from fastapi import WebSocket
from backend.data.execution import ExecutionResult, ExecutionStatus
from backend.server.conn_manager import ConnectionManager
from backend.server.model import Methods, WsMessage
@pytest.fixture
def connection_manager() -> ConnectionManager:
return ConnectionManager()
@pytest.fixture
def mock_websocket() -> AsyncMock:
websocket: AsyncMock = AsyncMock(spec=WebSocket)
websocket.send_text = AsyncMock()
return websocket
@pytest.mark.asyncio
async def test_connect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.connect(mock_websocket)
assert mock_websocket in connection_manager.active_connections
mock_websocket.accept.assert_called_once()
def test_disconnect(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.active_connections.add(mock_websocket)
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
connection_manager.disconnect(mock_websocket)
assert mock_websocket not in connection_manager.active_connections
assert mock_websocket not in connection_manager.subscriptions["test_graph_1"]
@pytest.mark.asyncio
async def test_subscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
await connection_manager.subscribe("test_graph", 1, mock_websocket)
assert mock_websocket in connection_manager.subscriptions["test_graph_1"]
@pytest.mark.asyncio
async def test_unsubscribe(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
await connection_manager.unsubscribe("test_graph", 1, mock_websocket)
assert "test_graph" not in connection_manager.subscriptions
@pytest.mark.asyncio
async def test_send_execution_result(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
connection_manager.subscriptions["test_graph_1"] = {mock_websocket}
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(tz=timezone.utc),
queue_time=None,
start_time=datetime.now(tz=timezone.utc),
end_time=datetime.now(tz=timezone.utc),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_called_once_with(
WsMessage(
method=Methods.EXECUTION_EVENT,
channel="test_graph_1",
data=result.model_dump(),
).model_dump_json()
)
@pytest.mark.asyncio
async def test_send_execution_result_no_subscribers(
connection_manager: ConnectionManager, mock_websocket: AsyncMock
) -> None:
result: ExecutionResult = ExecutionResult(
graph_id="test_graph",
graph_version=1,
graph_exec_id="test_exec_id",
node_exec_id="test_node_exec_id",
node_id="test_node_id",
block_id="test_block_id",
status=ExecutionStatus.COMPLETED,
input_data={"input1": "value1"},
output_data={"output1": ["result1"]},
add_time=datetime.now(),
queue_time=None,
start_time=datetime.now(),
end_time=datetime.now(),
)
await connection_manager.send_execution_result(result)
mock_websocket.send_text.assert_not_called()
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.service import ServiceManager, ServiceNotFoundError
from llama_index.core.workflow.workflow import Workflow
class ServiceWorkflow(Workflow):
"""This wokflow is only responsible to generate a number, it knows nothing about the caller."""
def __init__(self, *args, **kwargs) -> None:
self._the_answer = kwargs.pop("the_answer", 42)
super().__init__(*args, **kwargs)
@step
async def generate(self, ev: StartEvent) -> StopEvent:
return StopEvent(result=self._the_answer)
class NumGenerated(Event):
"""To be used in the dummy workflow below."""
num: int
class DummyWorkflow(Workflow):
"""
This workflow needs a number, and it calls another workflow to get one.
A service named "service_workflow" must be added to `DummyWorkflow` for
the step to be able to use it (see below).
This step knows nothing about the other workflow, it gets an instance
and it only knows it has to call `run` on that instance.
"""
@step
async def get_a_number(
self,
ev: StartEvent,
ctx: Context,
service_workflow: ServiceWorkflow = ServiceWorkflow(),
) -> NumGenerated:
res = await service_workflow.run()
return NumGenerated(num=int(res))
@step
async def multiply(self, ev: NumGenerated) -> StopEvent:
return StopEvent(ev.num * 2)
@pytest.mark.asyncio
async def test_e2e():
wf = DummyWorkflow()
# We are responsible for passing the ServiceWorkflow instances to the dummy workflow
# and give it a name, in this case "service_workflow"
wf.add_workflows(service_workflow=ServiceWorkflow(the_answer=1337))
res = await wf.run()
assert res == 2674
@pytest.mark.asyncio
async def test_default_value_for_service():
wf = DummyWorkflow()
# We don't add any workflow to leverage the default value defined by the user
res = await wf.run()
assert res == 84
def test_service_manager_add(workflow):
s = ServiceManager()
s.add("test_id", workflow)
assert s._services["test_id"] == workflow
def test_service_manager_get(workflow):
s = ServiceManager()
s._services["test_id"] = workflow
assert s.get("test_id") == workflow
with pytest.raises(ServiceNotFoundError):
s.get("not_found")
|
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.service import ServiceManager, ServiceNotFoundError
from llama_index.core.workflow.workflow import Workflow
class ServiceWorkflow(Workflow):
"""This wokflow is only responsible to generate a number, it knows nothing about the caller."""
def __init__(self, *args, **kwargs) -> None:
self._the_answer = kwargs.pop("the_answer", 42)
super().__init__(*args, **kwargs)
@step
async def generate(self, ev: StartEvent) -> StopEvent:
return StopEvent(result=self._the_answer)
class NumGenerated(Event):
"""To be used in the dummy workflow below."""
num: int
class DummyWorkflow(Workflow):
"""
This workflow needs a number, and it calls another workflow to get one.
A service named "service_workflow" must be added to `DummyWorkflow` for
the step to be able to use it (see below).
This step knows nothing about the other workflow, it gets an instance
and it only knows it has to call `run` on that instance.
"""
@step
async def get_a_number(
self,
ev: StartEvent,
ctx: Context,
service_workflow: ServiceWorkflow = ServiceWorkflow(),
) -> NumGenerated:
res = await service_workflow.run()
return NumGenerated(num=int(res))
@step
async def multiply(self, ev: NumGenerated) -> StopEvent:
return StopEvent(ev.num * 2)
@pytest.mark.asyncio()
async def test_e2e():
wf = DummyWorkflow()
# We are responsible for passing the ServiceWorkflow instances to the dummy workflow
# and give it a name, in this case "service_workflow"
wf.add_workflows(service_workflow=ServiceWorkflow(the_answer=1337))
res = await wf.run()
assert res == 2674
@pytest.mark.asyncio()
async def test_default_value_for_service():
wf = DummyWorkflow()
# We don't add any workflow to leverage the default value defined by the user
res = await wf.run()
assert res == 84
def test_service_manager_add(workflow):
s = ServiceManager()
s.add("test_id", workflow)
assert s._services["test_id"] == workflow
def test_service_manager_get(workflow):
s = ServiceManager()
s._services["test_id"] = workflow
assert s.get("test_id") == workflow
with pytest.raises(ServiceNotFoundError):
s.get("not_found")
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc, AudioDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
tensor, audio_tensor, key_frame_indices = vid.url.load()
vid.tensor = tensor
vid.audio = AudioDoc(tensor=audio_tensor)
vid.key_frame_indices = key_frame_indices
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import is_tf_available
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_video_shortcut_doc():
class MyDoc(BaseDoc):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocArray
from docarray.array import DocArrayStacked
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocArray[Image]([Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)])
return batch.stack()
@pytest.mark.proto
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
@pytest.mark.proto
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDoc):
tensor: NdArray[3, 224, 224]
da = DocArray[MyDoc]([MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)])
da = da.stack()
da.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDoc):
image: NdArray
da = DocArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocArrayStacked)
|
import numpy as np
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.array import DocumentArrayStacked
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
return batch.stack()
@pytest.mark.proto
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
@pytest.mark.proto
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDocument):
tensor: NdArray[3, 224, 224]
da = DocumentArray[MyDoc](
[MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
da = da.stack()
da.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDocument):
image: NdArray
da = DocumentArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocumentArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocumentArrayStacked)
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model_reused: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model_reused
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
def test_nanobeir_evaluator():
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> list[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
else:
return ["text"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy"
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> list[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
msg = (
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
raise ValueError(msg)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> list[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
else:
return ["text"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy"
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> list[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
import warnings
from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import _get_defaultdict
from torchvision.transforms.v2.utils import is_simple_tensor
class PermuteDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
import warnings
from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import _get_defaultdict
from torchvision.transforms.v2.utils import is_simple_tensor
class PermuteDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import SABLRetinaHead
def test_sabl_retina_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = SABLRetinaHead(
num_classes=4,
in_channels=3,
feat_channels=10,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
train_cfg=cfg)
if torch.cuda.is_available():
head.cuda()
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 3, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds = head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls'])
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_cls_loss.item() == 0, (
'there should be no box cls loss when there are no true boxes')
assert empty_box_reg_loss.item() == 0, (
'there should be no box reg loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls'])
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_cls_loss.item() > 0, 'box loss cls should be non-zero'
assert onegt_box_reg_loss.item() > 0, 'box loss reg should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import SABLRetinaHead
def test_sabl_retina_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = SABLRetinaHead(
num_classes=4,
in_channels=3,
feat_channels=10,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
train_cfg=cfg)
if torch.cuda.is_available():
head.cuda()
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 3, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds = head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls'])
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_cls_loss.item() == 0, (
'there should be no box cls loss when there are no true boxes')
assert empty_box_reg_loss.item() == 0, (
'there should be no box reg loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls'])
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_cls_loss.item() > 0, 'box loss cls should be non-zero'
assert onegt_box_reg_loss.item() > 0, 'box loss reg should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu, all_reduce_params)
from .utils import (get_dist_info, init_dist, init_local_group, get_backend,
get_world_size, get_rank, get_local_size, get_local_rank,
is_main_process, master_only, barrier, get_local_group,
is_distributed, get_default_group, get_data_device,
get_comm_device, cast_data_device)
__all__ = [
'all_gather_object', 'all_reduce', 'all_gather', 'all_reduce_dict',
'collect_results', 'collect_results_cpu', 'collect_results_gpu', 'gather',
'broadcast', 'gather_object', 'sync_random_seed', 'broadcast_object_list',
'get_dist_info', 'init_dist', 'init_local_group', 'get_backend',
'get_world_size', 'get_rank', 'get_local_size', 'get_local_group',
'get_local_rank', 'is_main_process', 'master_only', 'barrier',
'is_distributed', 'get_default_group', 'all_reduce_params',
'get_data_device', 'get_comm_device', 'cast_data_device'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu)
from .utils import (get_dist_info, init_dist, init_local_group, get_backend,
get_world_size, get_rank, get_local_size, get_local_rank,
is_main_process, master_only, barrier, get_local_group,
is_distributed, get_default_group, get_data_device,
get_comm_device, cast_data_device)
__all__ = [
'all_gather_object', 'all_reduce', 'all_gather', 'all_reduce_dict',
'collect_results', 'collect_results_cpu', 'collect_results_gpu', 'gather',
'broadcast', 'gather_object', 'sync_random_seed', 'broadcast_object_list',
'get_dist_info', 'init_dist', 'init_local_group', 'get_backend',
'get_world_size', 'get_rank', 'get_local_size', 'get_local_group',
'get_local_rank', 'is_main_process', 'master_only', 'barrier',
'is_distributed', 'get_default_group', 'get_data_device',
'get_comm_device', 'cast_data_device'
]
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
from .Transformer import Transformer
from .Asym import Asym
from .BoW import BoW
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
from .CLIPModel import CLIPModel
__all__ = [
"Transformer",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.zapier.toolkit import ZapierToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierToolkit": "langchain_community.agent_toolkits.zapier.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierToolkit",
]
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocArray[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocArray.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocArray[nested_doc.__class__].from_pandas(df=df)
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDocument):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocumentArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocumentArray[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocumentArray.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocumentArray[nested_doc.__class__].from_pandas(df=df)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..annoy_searcher import AnnoySearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[1] / 'config.yml'))
assert ex.metric == 'euclidean'
def test_simple_annoy():
from annoy import AnnoyIndex
_index = AnnoyIndex(5, 'angular')
for j in range(3):
_index.add_item(j, np.random.random((5,)))
_index.build(4)
idx1, _ = _index.get_nns_by_vector(
np.random.random((5,)), 3, include_distances=True
)
assert len(idx1) == 3
@pytest.mark.parametrize(
['metric', 'is_distance'],
[
('angular', True),
('euclidean', True),
('manhattan', True),
('hamming', True),
('dot', True),
('angular', False),
('euclidean', False),
('manhattan', False),
('hamming', False),
('dot', False),
],
)
def test_metric(tmpdir, metric, is_distance):
metas = {
'workspace': str(tmpdir),
'name': 'searcher',
}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(
dump_path=DUMP_PATH,
default_top_k=TOP_K,
metas=metas,
metric=metric,
is_distance=is_distance,
runtime_args=runtime_args,
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert (
docs[0].matches[i].scores[metric].value
>= docs[0].matches[i + 1].scores[metric].value
)
else:
assert (
docs[0].matches[i].scores[metric].value
<= docs[0].matches[i + 1].scores[metric].value
)
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(
dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_fill_embeddings(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(
dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
da = DocumentArray([Document(id=0), Document(id=1), Document(id=20)])
indexer.fill_embedding(da)
assert da['0'].embedding is not None
assert da['1'].embedding is not None
assert da['20'].embedding is None
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..annoy_searcher import AnnoySearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[1] / 'config.yml'))
assert ex.metric == 'euclidean'
def test_simple_annoy():
from annoy import AnnoyIndex
_index = AnnoyIndex(5, 'angular')
for j in range(3):
_index.add_item(j, np.random.random((5,)))
_index.build(4)
idx1, _ = _index.get_nns_by_vector(
np.random.random((5,)), 3, include_distances=True
)
assert len(idx1) == 3
@pytest.mark.parametrize(
['metric', 'is_distance'],
[
('angular', True),
('euclidean', True),
('manhattan', True),
('hamming', True),
('dot', True),
('angular', False),
('euclidean', False),
('manhattan', False),
('hamming', False),
('dot', False),
],
)
def test_metric(tmpdir, metric, is_distance):
metas = {
'workspace': str(tmpdir),
'name': 'searcher',
}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(
dump_path=DUMP_PATH,
default_top_k=TOP_K,
metas=metas,
metric=metric,
is_distance=is_distance,
runtime_args=runtime_args,
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert (
docs[0].matches[i].scores[metric].value
>= docs[0].matches[i + 1].scores[metric].value
)
else:
assert (
docs[0].matches[i].scores[metric].value
<= docs[0].matches[i + 1].scores[metric].value
)
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(
dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_fill_embeddings(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(
dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
da = DocumentArray([Document(id=0), Document(id=1), Document(id=20)])
indexer.fill_embedding(da)
assert da['0'].embedding is not None
assert da['1'].embedding is not None
assert da['20'].embedding is None
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.meta import convert_messages_to_prompt_llama
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_messages_to_prompt_llama": "langchain_community.chat_models.meta",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"convert_messages_to_prompt_llama",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.meta import convert_messages_to_prompt_llama
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_messages_to_prompt_llama": "langchain_community.chat_models.meta"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"convert_messages_to_prompt_llama",
]
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.0.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": [
"datasets",
"accelerate>=0.20.3",
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.0.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
"datasets",
"accelerate>=0.20.3",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from executor.torch_encoder import ImageTorchEncoder
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8)),
],
)
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test', inputs=[Document(blob=arr_in)], return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512,)
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)
),
return_results=True,
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[['r', 0], ['c', 11], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[['r', 0], ['c', 0], ['cc', 11]],
'cc',
),
],
)
def test_traversal_paths(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
return len([em for em in embeddings if em is not None]) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True,
)
assert validate_traversal(docs_per_path)(resp)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8)),
],
)
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test', inputs=[Document(blob=arr_in)], return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512,)
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)
),
return_results=True,
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[['r', 0], ['c', 11], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[['r', 0], ['c', 0], ['cc', 11]],
'cc',
),
],
)
def test_traversal_paths(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
return len([em for em in embeddings if em is not None]) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True,
)
assert validate_traversal(docs_per_path)(resp)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
from typing import Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class MsWordParser(BaseBlobParser):
"""Parse the Microsoft Word documents from a blob."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Parse a Microsoft Word document into the Document iterator.
Args:
blob: The blob to parse.
Returns: An iterator of Documents.
"""
try:
from unstructured.partition.doc import partition_doc
from unstructured.partition.docx import partition_docx
except ImportError as e:
raise ImportError(
"Could not import unstructured, please install with `pip install "
"unstructured`."
) from e
mime_type_parser = {
"application/msword": partition_doc,
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": (
partition_docx
),
}
if blob.mimetype not in (
"application/msword",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
):
raise ValueError("This blob type is not supported for this parser.")
with blob.as_bytes_io() as word_document:
elements = mime_type_parser[blob.mimetype](file=word_document)
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": blob.source}
yield Document(page_content=text, metadata=metadata)
|
from typing import Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import Blob
class MsWordParser(BaseBlobParser):
"""Parse the Microsoft Word documents from a blob."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Parse a Microsoft Word document into the Document iterator.
Args:
blob: The blob to parse.
Returns: An iterator of Documents.
"""
try:
from unstructured.partition.doc import partition_doc
from unstructured.partition.docx import partition_docx
except ImportError as e:
raise ImportError(
"Could not import unstructured, please install with `pip install "
"unstructured`."
) from e
mime_type_parser = {
"application/msword": partition_doc,
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": (
partition_docx
),
}
if blob.mimetype not in ( # type: ignore[attr-defined]
"application/msword",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
):
raise ValueError("This blob type is not supported for this parser.")
with blob.as_bytes_io() as word_document: # type: ignore[attr-defined]
elements = mime_type_parser[blob.mimetype](file=word_document) # type: ignore[attr-defined] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator] # type: ignore[operator]
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": blob.source} # type: ignore[attr-defined]
yield Document(page_content=text, metadata=metadata)
|
from __future__ import annotations
from typing import Optional, Type
from urllib.parse import urlparse
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateToolInput(BaseModel):
"""Input for NavigateToolInput."""
url: str = Field(..., description="url to navigate to")
@model_validator(mode="before")
@classmethod
def validate_url_scheme(cls, values: dict) -> dict:
"""Check that the URL scheme is valid."""
url = values.get("url")
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError("URL scheme must be 'http' or 'https'")
return values
class NavigateTool(BaseBrowserTool):
"""Tool for navigating a browser to a URL.
**Security Note**: This tool provides code to control web-browser navigation.
This tool can navigate to any URL, including internal network URLs, and
URLs exposed on the server itself.
However, if exposing this tool to end-users, consider limiting network
access to the server that hosts the agent.
By default, the URL scheme has been limited to 'http' and 'https' to
prevent navigation to local file system URLs (or other schemes).
If access to the local file system is required, consider creating a custom
tool or providing a custom args_schema that allows the desired URL schemes.
See https://python.langchain.com/docs/security for more information.
"""
name: str = "navigate_browser"
description: str = "Navigate a browser to the specified URL"
args_schema: Type[BaseModel] = NavigateToolInput
def _run(
self,
url: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
|
from __future__ import annotations
from typing import Optional, Type
from urllib.parse import urlparse
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateToolInput(BaseModel):
"""Input for NavigateToolInput."""
url: str = Field(..., description="url to navigate to")
@model_validator(mode="before")
@classmethod
def validate_url_scheme(cls, values: dict) -> dict:
"""Check that the URL scheme is valid."""
url = values.get("url")
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError("URL scheme must be 'http' or 'https'")
return values
class NavigateTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for navigating a browser to a URL.
**Security Note**: This tool provides code to control web-browser navigation.
This tool can navigate to any URL, including internal network URLs, and
URLs exposed on the server itself.
However, if exposing this tool to end-users, consider limiting network
access to the server that hosts the agent.
By default, the URL scheme has been limited to 'http' and 'https' to
prevent navigation to local file system URLs (or other schemes).
If access to the local file system is required, consider creating a custom
tool or providing a custom args_schema that allows the desired URL schemes.
See https://python.langchain.com/docs/security for more information.
"""
name: str = "navigate_browser"
description: str = "Navigate a browser to the specified URL"
args_schema: Type[BaseModel] = NavigateToolInput
def _run(
self,
url: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
|
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __version__
from jina.constants import __jina_env__
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
from .test_runtimes import _create_gateway_runtime, _create_head_runtime
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_worker(port):
# create a single worker runtime
p = multiprocessing.Process(target=_create_worker_runtime, args=(port,))
p.start()
time.sleep(0.1)
return p
def _create_gateway(port, graph, pod_addr, protocol):
# create a single worker runtime
# create a single gateway runtime
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph, pod_addr, port, protocol),
)
p.start()
time.sleep(0.1)
return p
def _create_head(port, connection_list_dict, polling='ANY'):
p = multiprocessing.Process(
target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling)
)
p.start()
time.sleep(0.1)
return p
@pytest.mark.parametrize('runtime', ['head', 'worker', 'gateway'])
def test_jina_info_grpc_based_runtimes(runtime, port_generator):
port = port_generator()
connection_list_dict = {}
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
if runtime == 'head':
p = _create_head(port, connection_list_dict)
elif runtime == 'gateway':
p = _create_gateway(port, graph_description, pod_addresses, 'grpc')
else:
p = _create_worker(port)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
channel = grpc.insecure_channel(f'localhost:{port}')
stub = jina_pb2_grpc.JinaInfoRPCStub(channel)
res = stub._status(
jina_pb2.google_dot_protobuf_dot_empty__pb2.Empty(),
)
assert res.jina['jina'] == __version__
for env_var in __jina_env__:
assert env_var in res.envs
except Exception:
assert False
finally:
p.terminate()
p.join()
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_jina_info_gateway_http(protocol, port_generator):
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
p = _create_gateway(port, graph_description, pod_addresses, protocol)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
x = requests.get(f'http://localhost:{port}/status')
resp = x.json()
assert 'jina' in resp
assert 'envs' in resp
assert resp['jina']['jina'] == __version__
for env_var in __jina_env__:
assert env_var in resp['envs']
except Exception:
assert False
finally:
p.terminate()
p.join()
|
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __jina_env__, __version__
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
from .test_runtimes import _create_gateway_runtime, _create_head_runtime
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_worker(port):
# create a single worker runtime
p = multiprocessing.Process(target=_create_worker_runtime, args=(port,))
p.start()
time.sleep(0.1)
return p
def _create_gateway(port, graph, pod_addr, protocol):
# create a single worker runtime
# create a single gateway runtime
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph, pod_addr, port, protocol),
)
p.start()
time.sleep(0.1)
return p
def _create_head(port, connection_list_dict, polling='ANY'):
p = multiprocessing.Process(
target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling)
)
p.start()
time.sleep(0.1)
return p
@pytest.mark.parametrize('runtime', ['head', 'worker', 'gateway'])
def test_jina_info_grpc_based_runtimes(runtime, port_generator):
port = port_generator()
connection_list_dict = {}
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
if runtime == 'head':
p = _create_head(port, connection_list_dict)
elif runtime == 'gateway':
p = _create_gateway(port, graph_description, pod_addresses, 'grpc')
else:
p = _create_worker(port)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
channel = grpc.insecure_channel(f'localhost:{port}')
stub = jina_pb2_grpc.JinaInfoRPCStub(channel)
res = stub._status(
jina_pb2.google_dot_protobuf_dot_empty__pb2.Empty(),
)
assert res.jina['jina'] == __version__
for env_var in __jina_env__:
assert env_var in res.envs
except Exception:
assert False
finally:
p.terminate()
p.join()
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_jina_info_gateway_http(protocol, port_generator):
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
p = _create_gateway(port, graph_description, pod_addresses, protocol)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
x = requests.get(f'http://localhost:{port}/status')
resp = x.json()
assert 'jina' in resp
assert 'envs' in resp
assert resp['jina']['jina'] == __version__
for env_var in __jina_env__:
assert env_var in resp['envs']
except Exception:
assert False
finally:
p.terminate()
p.join()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# use caffe img_norm, size_divisor=128, pillow resize
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(1333, 800),
keep_ratio=True,
backend='pillow'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=128),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True, backend='pillow'),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=128),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import functools
import torch
import torch._custom_ops
import torch.library
# Ensure that torch.ops.torchvision is visible
import torchvision.extension # noqa: F401
@functools.lru_cache(None)
def get_meta_lib():
return torch.library.Library("torchvision", "IMPL", "Meta")
def register_meta(op_name, overload_name="default"):
def wrapper(fn):
if torchvision.extension._has_ops():
get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)
return fn
return wrapper
@register_meta("roi_align")
def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
num_rois = rois.size(0)
channels = input.size(1)
return input.new_empty((num_rois, channels, pooled_height, pooled_width))
@register_meta("_roi_align_backward")
def meta_roi_align_backward(
grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
@register_meta("ps_roi_align")
def meta_ps_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
channels = input.size(1)
torch._check(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width",
)
num_rois = rois.size(0)
out_size = (num_rois, channels // (pooled_height * pooled_width), pooled_height, pooled_width)
return input.new_empty(out_size), torch.empty(out_size, dtype=torch.int32, device="meta")
@register_meta("_ps_roi_align_backward")
def meta_ps_roi_align_backward(
grad,
rois,
channel_mapping,
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
batch_size,
channels,
height,
width,
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
@register_meta("roi_pool")
def meta_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
num_rois = rois.size(0)
channels = input.size(1)
out_size = (num_rois, channels, pooled_height, pooled_width)
return input.new_empty(out_size), torch.empty(out_size, device="meta", dtype=torch.int32)
@register_meta("_roi_pool_backward")
def meta_roi_pool_backward(
grad, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
@torch._custom_ops.impl_abstract("torchvision::nms")
def meta_nms(dets, scores, iou_threshold):
torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D")
torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}")
torch._check(scores.dim() == 1, lambda: f"scores should be a 1d tensor, got {scores.dim()}")
torch._check(
dets.size(0) == scores.size(0),
lambda: f"boxes and scores should have same number of elements in dimension 0, got {dets.size(0)} and {scores.size(0)}",
)
ctx = torch._custom_ops.get_ctx()
num_to_keep = ctx.create_unbacked_symint()
return dets.new_empty(num_to_keep, dtype=torch.long)
|
import functools
import torch
import torch._custom_ops
import torch.library
# Ensure that torch.ops.torchvision is visible
import torchvision.extension # noqa: F401
@functools.lru_cache(None)
def get_meta_lib():
return torch.library.Library("torchvision", "IMPL", "Meta")
def register_meta(op_name, overload_name="default"):
def wrapper(fn):
if torchvision.extension._has_ops():
get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)
return fn
return wrapper
@register_meta("roi_align")
def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
num_rois = rois.size(0)
channels = input.size(1)
return input.new_empty((num_rois, channels, pooled_height, pooled_width))
@register_meta("_roi_align_backward")
def meta_roi_align_backward(
grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
@register_meta("ps_roi_align")
def meta_ps_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
channels = input.size(1)
torch._check(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width",
)
num_rois = rois.size(0)
out_size = (num_rois, channels // (pooled_height * pooled_width), pooled_height, pooled_width)
return input.new_empty(out_size), torch.empty(out_size, dtype=torch.int32, device="meta")
@register_meta("_ps_roi_align_backward")
def meta_ps_roi_align_backward(
grad,
rois,
channel_mapping,
spatial_scale,
pooled_height,
pooled_width,
sampling_ratio,
batch_size,
channels,
height,
width,
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
@torch._custom_ops.impl_abstract("torchvision::nms")
def meta_nms(dets, scores, iou_threshold):
torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D")
torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}")
torch._check(scores.dim() == 1, lambda: f"scores should be a 1d tensor, got {scores.dim()}")
torch._check(
dets.size(0) == scores.size(0),
lambda: f"boxes and scores should have same number of elements in dimension 0, got {dets.size(0)} and {scores.size(0)}",
)
ctx = torch._custom_ops.get_ctx()
num_to_keep = ctx.create_unbacked_symint()
return dets.new_empty(num_to_keep, dtype=torch.long)
|
import http.client
import json
from typing import Any, Optional, TypedDict
WRITE_KEY = "310apTK0HUFl4AOv"
class EventDict(TypedDict):
event: str
properties: Optional[dict[str, Any]]
def create_events(events: list[EventDict]) -> Optional[Any]:
try:
data = {
"events": [
{
"write_key": WRITE_KEY,
"name": event["event"],
"properties": event.get("properties"),
}
for event in events
]
}
conn = http.client.HTTPSConnection("app.firstpartyhq.com")
payload = json.dumps(data)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
conn.request("POST", "/events/v1/track/bulk", payload, headers)
res = conn.getresponse()
return json.loads(res.read())
except Exception:
return None
|
import http.client
import json
from typing import Any, Dict, List, Optional, TypedDict
WRITE_KEY = "310apTK0HUFl4AOv"
class EventDict(TypedDict):
event: str
properties: Optional[Dict[str, Any]]
def create_events(events: List[EventDict]) -> Optional[Any]:
try:
data = {
"events": [
{
"write_key": WRITE_KEY,
"name": event["event"],
"properties": event.get("properties"),
}
for event in events
]
}
conn = http.client.HTTPSConnection("app.firstpartyhq.com")
payload = json.dumps(data)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
conn.request("POST", "/events/v1/track/bulk", payload, headers)
res = conn.getresponse()
return json.loads(res.read())
except Exception:
return None
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = (
"StrictFormatter",
"abatch_iterate",
"batch_iterate",
"build_extra_kwargs",
"check_package_version",
"comma_list",
"convert_to_secret_str",
"formatter",
"from_env",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_from_dict_or_env",
"get_from_env",
"get_pydantic_field_names",
"guard_import",
"image",
"mock_now",
"pre_init",
"print_text",
"raise_for_status_with_text",
"secret_from_env",
"stringify_dict",
"stringify_value",
"try_load_from_hub",
"xor_args",
)
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = (
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
)
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
_base_ = './solo_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './solo_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class DropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class DropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_set_item."""
import functools
import tensorflow as tf
from tensorflow.lite.python import lite
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
def _tflite_convert_verify_op(tflite_convert_function, *args, **kwargs):
"""Verifies that the result of the conversion contains DynamicUpdateSlice op."""
result = tflite_convert_function(*args, **kwargs)
tflite_model_binary = result[0]
if not result[0]:
tf.compat.v1.logging.error(result[1]) # stderr from running tflite_convert.
raise RuntimeError("Failed to build model: \n\n" + result[1])
interpreter = lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
for op in interpreter._get_ops_details(): # pylint: disable=protected-access
if op["op_name"] == "DYNAMIC_UPDATE_SLICE":
return result
raise RuntimeError(
"Expected to generate DYNAMIC_UPDATE_SLICE op node in graph.")
@register_make_test_function()
def make_dynamic_update_slice_tests(options):
"""Make a set of tests to do TensorListSetItem."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32, tf.bool],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
"index": [0, 1, 2, 3],
},
]
def build_graph(parameters):
"""Build the TensorListSetItem op testing graph."""
data = tf.compat.v1.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
item = tf.compat.v1.placeholder(
dtype=parameters["element_dtype"], shape=parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
tensor_list = list_ops.tensor_list_set_item(tensor_list,
parameters["index"], item)
out = list_ops.tensor_list_stack(
tensor_list,
num_elements=parameters["num_elements"],
element_dtype=parameters["element_dtype"])
return [data, item], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
item = create_tensor_data(parameters["element_dtype"],
parameters["element_shape"])
return [data, item], sess.run(
outputs, feed_dict=dict(zip(inputs, [data, item])))
options.enable_dynamic_update_slice = True
options.tflite_convert_function = functools.partial(
_tflite_convert_verify_op, options.tflite_convert_function)
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_set_item."""
import functools
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
def _tflite_convert_verify_op(tflite_convert_function, *args, **kwargs):
"""Verifies that the result of the conversion contains DynamicUpdateSlice op."""
result = tflite_convert_function(*args, **kwargs)
tflite_model_binary = result[0]
if not result[0]:
tf.compat.v1.logging.error(result[1]) # stderr from running tflite_convert.
raise RuntimeError("Failed to build model: \n\n" + result[1])
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
for op in interpreter._get_ops_details(): # pylint: disable=protected-access
if op["op_name"] == "DYNAMIC_UPDATE_SLICE":
return result
raise RuntimeError(
"Expected to generate DYNAMIC_UPDATE_SLICE op node in graph.")
@register_make_test_function()
def make_dynamic_update_slice_tests(options):
"""Make a set of tests to do TensorListSetItem."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32, tf.bool],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
"index": [0, 1, 2, 3],
},
]
def build_graph(parameters):
"""Build the TensorListSetItem op testing graph."""
data = tf.compat.v1.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
item = tf.compat.v1.placeholder(
dtype=parameters["element_dtype"], shape=parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
tensor_list = list_ops.tensor_list_set_item(tensor_list,
parameters["index"], item)
out = list_ops.tensor_list_stack(
tensor_list,
num_elements=parameters["num_elements"],
element_dtype=parameters["element_dtype"])
return [data, item], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
item = create_tensor_data(parameters["element_dtype"],
parameters["element_shape"])
return [data, item], sess.run(
outputs, feed_dict=dict(zip(inputs, [data, item])))
options.enable_dynamic_update_slice = True
options.tflite_convert_function = functools.partial(
_tflite_convert_verify_op, options.tflite_convert_function)
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Multiply")
class Multiply(Merge):
"""Performs elementwise multiplication.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Multiply()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.multiply([x1, x2])`
>>> y = keras.layers.Multiply()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
masks = [backend.get_keras_mask(x) for x in inputs]
has_output_mask = all(mask is not None for mask in masks)
output = None
output_mask = None
for x, mask in zip(inputs, masks):
if mask is not None:
mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))
# Replace 0s with 1s outside of mask.
x = ops.where(mask, x, ops.cast(1, x.dtype))
if has_output_mask:
output_mask = (
mask
if output_mask is None
else ops.logical_or(output_mask, mask)
)
output = x if output is None else ops.multiply(output, x)
if has_output_mask:
# Replace 1s with 0s outside of mask per standard masking rules.
output = ops.where(output_mask, output, ops.cast(0, output.dtype))
output_mask = ops.any(output_mask, axis=-1, keepdims=False)
backend.set_keras_mask(output, output_mask)
return output
@keras_export("keras.layers.multiply")
def multiply(inputs, **kwargs):
"""Functional interface to the `keras.layers.Multiply` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.multiply([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.multiply([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Multiply(**kwargs)(inputs)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Multiply")
class Multiply(Merge):
"""Performs elementwise multiplication.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Multiply()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.multiply([x1, x2])`
>>> y = keras.layers.Multiply()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
masks = [getattr(x, "_keras_mask", None) for x in inputs]
has_output_mask = all(mask is not None for mask in masks)
output = None
output_mask = None
for x, mask in zip(inputs, masks):
if mask is not None:
mask = ops.broadcast_to(ops.expand_dims(mask, -1), ops.shape(x))
# Replace 0s with 1s outside of mask.
x = ops.where(mask, x, ops.cast(1, x.dtype))
if has_output_mask:
output_mask = (
mask
if output_mask is None
else ops.logical_or(output_mask, mask)
)
output = x if output is None else ops.multiply(output, x)
if has_output_mask:
# Replace 1s with 0s outside of mask per standard masking rules.
output = ops.where(output_mask, output, ops.cast(0, output.dtype))
output_mask = ops.any(output_mask, axis=-1, keepdims=False)
output._keras_mask = output_mask
return output
@keras_export("keras.layers.multiply")
def multiply(inputs, **kwargs):
"""Functional interface to the `keras.layers.Multiply` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.multiply([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.multiply([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Multiply(**kwargs)(inputs)
|
from datetime import datetime, timedelta, timezone
from typing import Annotated, Union
import jwt
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jwt.exceptions import InvalidTokenError
from passlib.context import CryptContext
from pydantic import BaseModel
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
fake_users_db = {
"johndoe": {
"username": "johndoe",
"full_name": "John Doe",
"email": "johndoe@example.com",
"hashed_password": "$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW",
"disabled": False,
}
}
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: Union[str, None] = None
class User(BaseModel):
username: str
email: Union[str, None] = None
full_name: Union[str, None] = None
disabled: Union[bool, None] = None
class UserInDB(User):
hashed_password: str
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Union[timedelta, None] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: Annotated[str, Depends(oauth2_scheme)]):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except InvalidTokenError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(
current_user: Annotated[User, Depends(get_current_user)],
):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: Annotated[OAuth2PasswordRequestForm, Depends()],
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return current_user
@app.get("/users/me/items/")
async def read_own_items(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return [{"item_id": "Foo", "owner": current_user.username}]
|
from datetime import datetime, timedelta, timezone
from typing import Annotated, Union
import jwt
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jwt.exceptions import InvalidTokenError
from passlib.context import CryptContext
from pydantic import BaseModel
# to get a string like this run:
# openssl rand -hex 32
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
fake_users_db = {
"johndoe": {
"username": "johndoe",
"full_name": "John Doe",
"email": "johndoe@example.com",
"hashed_password": "$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW",
"disabled": False,
}
}
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: Union[str, None] = None
class User(BaseModel):
username: str
email: Union[str, None] = None
full_name: Union[str, None] = None
disabled: Union[bool, None] = None
class UserInDB(User):
hashed_password: str
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password):
return pwd_context.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Union[timedelta, None] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: Annotated[str, Depends(oauth2_scheme)]):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except InvalidTokenError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(
current_user: Annotated[User, Depends(get_current_user)],
):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: Annotated[OAuth2PasswordRequestForm, Depends()],
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return current_user
@app.get("/users/me/items/")
async def read_own_items(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return [{"item_id": "Foo", "owner": current_user.username}]
|
from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING, Literal, Optional, Union
from pydantic import BaseModel, Field
from langchain_core.prompts import (
BasePromptTemplate,
PromptTemplate,
aformat_document,
format_document,
)
from langchain_core.tools.simple import Tool
if TYPE_CHECKING:
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = retriever.invoke(query, config={"callbacks": callbacks})
content = document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
if response_format == "content_and_artifact":
return (content, docs)
return content
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = await retriever.ainvoke(query, config={"callbacks": callbacks})
content = document_separator.join(
[await aformat_document(doc, document_prompt) for doc in docs]
)
if response_format == "content_and_artifact":
return (content, docs)
return content
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
document_prompt: The prompt to use for the document. Defaults to None.
document_separator: The separator to use between documents. Defaults to "\n\n".
response_format: The tool response format. If "content" then the output of
the tool is interpreted as the contents of a ToolMessage. If
"content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content".
Returns:
Tool class to pass to an agent.
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
)
|
from __future__ import annotations
from functools import partial
from typing import Literal, Optional, Union
from pydantic import BaseModel, Field
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.prompts import (
BasePromptTemplate,
PromptTemplate,
aformat_document,
format_document,
)
from langchain_core.retrievers import BaseRetriever
from langchain_core.tools.simple import Tool
class RetrieverInput(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = retriever.invoke(query, config={"callbacks": callbacks})
content = document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
if response_format == "content_and_artifact":
return (content, docs)
return content
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Union[str, tuple[str, list[Document]]]:
docs = await retriever.ainvoke(query, config={"callbacks": callbacks})
content = document_separator.join(
[await aformat_document(doc, document_prompt) for doc in docs]
)
if response_format == "content_and_artifact":
return (content, docs)
return content
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: Optional[BasePromptTemplate] = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Tool:
"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
document_prompt: The prompt to use for the document. Defaults to None.
document_separator: The separator to use between documents. Defaults to "\n\n".
response_format: The tool response format. If "content" then the output of
the tool is interpreted as the contents of a ToolMessage. If
"content_and_artifact" then the output is expected to be a two-tuple
corresponding to the (content, artifact) of a ToolMessage (artifact
being a list of documents in this case). Defaults to "content".
Returns:
Tool class to pass to an agent.
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
)
|
"""RSS feed reader for news - processes each article with NewsArticleReader."""
import logging
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.web.news.base import NewsArticleReader
logger = logging.getLogger(__name__)
class RssNewsReader(BaseReader):
"""
RSS news reader.
Reads news content from RSS feeds and parses with NewsArticleReader.
"""
def __init__(self, **reader_kwargs: Any) -> None:
"""
Initialize with parameters.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
"""
try:
import feedparser # noqa: F401
except ImportError:
raise ImportError(
"`feedparser` package not found, please run `pip install feedparser`"
)
try:
import listparser # noqa: F401
except ImportError:
raise ImportError(
"`listparser` package not found, please run `pip install listparser`"
)
self.reader_kwargs = reader_kwargs
def load_data(self, urls: List[str] = None, opml: str = None) -> List[Document]:
"""
Load data from either RSS feeds or OPML.
Args:
urls (List[str]): List of RSS URLs to load.
opml (str): URL to OPML file or string or byte OPML content.
Returns:
List[Document]: List of documents.
"""
if (urls is None) == (
opml is None
): # This is True if both are None or neither is None
raise ValueError(
"Provide either the urls or the opml argument, but not both."
)
import feedparser
if urls and not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
if not urls and opml:
try:
import listparser
except ImportError as e:
raise ImportError(
"Package listparser must be installed if the opml arg is used. "
"Please install with 'pip install listparser' or use the "
"urls arg instead."
) from e
rss = listparser.parse(opml)
urls = [feed.url for feed in rss.feeds]
for url in urls:
try:
feed = feedparser.parse(url)
for i, entry in enumerate(feed.entries):
article = NewsArticleReader(**self.reader_kwargs).load_data(
urls=[entry.link],
)[0]
article.metadata["feed"] = url
documents.append(
Document(text=article.text, metadata=article.metadata)
)
except Exception as e:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
return documents
if __name__ == "__main__":
reader = RssNewsReader()
logger.info(reader.load_data(urls=["https://www.engadget.com/rss.xml"]))
# Generate keywords and summary for each article
reader = RssNewsReader(use_nlp=True)
logger.info(reader.load_data(urls=["https://www.engadget.com/rss.xml"]))
|
"""RSS feed reader for news - processes each article with NewsArticleReader."""
import logging
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.web.news.base import NewsArticleReader
logger = logging.getLogger(__name__)
class RssNewsReader(BaseReader):
"""
RSS news reader.
Reads news content from RSS feeds and parses with NewsArticleReader.
"""
def __init__(self, **reader_kwargs: Any) -> None:
"""
Initialize with parameters.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
"""
try:
import feedparser # noqa: F401
except ImportError:
raise ImportError(
"`feedparser` package not found, please run `pip install feedparser`"
)
try:
import listparser # noqa: F401
except ImportError:
raise ImportError(
"`listparser` package not found, please run `pip install listparser`"
)
self.reader_kwargs = reader_kwargs
def load_data(self, urls: List[str] = None, opml: str = None) -> List[Document]:
"""
Load data from either RSS feeds or OPML.
Args:
urls (List[str]): List of RSS URLs to load.
opml (str): URL to OPML file or string or byte OPML content.
Returns:
List[Document]: List of documents.
"""
if (urls is None) == (
opml is None
): # This is True if both are None or neither is None
raise ValueError(
"Provide either the urls or the opml argument, but not both."
)
import feedparser
if urls and not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
if not urls and opml:
try:
import listparser
except ImportError as e:
raise ImportError(
"Package listparser must be installed if the opml arg is used. "
"Please install with 'pip install listparser' or use the "
"urls arg instead."
) from e
rss = listparser.parse(opml)
urls = [feed.url for feed in rss.feeds]
for url in urls:
try:
feed = feedparser.parse(url)
for i, entry in enumerate(feed.entries):
article = NewsArticleReader(**self.reader_kwargs).load_data(
urls=[entry.link],
)[0]
article.metadata["feed"] = url
documents.append(
Document(text=article.text, metadata=article.metadata)
)
except Exception as e:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
return documents
if __name__ == "__main__":
reader = RssNewsReader()
logger.info(reader.load_data(urls=["https://www.engadget.com/rss.xml"]))
# Generate keywords and summary for each article
reader = RssNewsReader(use_nlp=True)
logger.info(reader.load_data(urls=["https://www.engadget.com/rss.xml"]))
|
"""Test chat model integration."""
import json
from collections.abc import Generator
from contextlib import contextmanager
from typing import Any
from unittest.mock import patch
import pytest
from httpx import Client, Request, Response
from langchain_core.messages import ChatMessage
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
MODEL_NAME = "llama3.1"
class TestChatOllama(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": MODEL_NAME}
def test__parse_arguments_from_tool_call() -> None:
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
assert response is not None
assert isinstance(response["arg_1"], str)
@contextmanager
def _mock_httpx_client_stream(
*args: Any, **kwargs: Any
) -> Generator[Response, Any, Any]:
yield Response(
status_code=200,
content='{"message": {"role": "assistant", "content": "The meaning ..."}}',
request=Request(method="POST", url="http://whocares:11434"),
)
def test_arbitrary_roles_accepted_in_chatmessages(
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
llm = ChatOllama(
model=MODEL_NAME,
verbose=True,
format=None,
)
messages = [
ChatMessage(
role="somerandomrole",
content="I'm ok with you adding any role message now!",
),
ChatMessage(role="control", content="thinking"),
ChatMessage(role="user", content="What is the meaning of life?"),
]
llm.invoke(messages)
@patch("langchain_ollama.chat_models.validate_model")
def test_validate_model_on_init(mock_validate_model: Any) -> None:
"""Test that the model is validated on initialization when requested."""
# Test that validate_model is called when validate_model_on_init=True
ChatOllama(model=MODEL_NAME, validate_model_on_init=True)
mock_validate_model.assert_called_once()
mock_validate_model.reset_mock()
# Test that validate_model is NOT called when validate_model_on_init=False
ChatOllama(model=MODEL_NAME, validate_model_on_init=False)
mock_validate_model.assert_not_called()
# Test that validate_model is NOT called by default
ChatOllama(model=MODEL_NAME)
mock_validate_model.assert_not_called()
|
"""Test chat model integration."""
import json
from collections.abc import Generator
from contextlib import contextmanager
from typing import Any
from unittest.mock import patch
import pytest
from httpx import Client, Request, Response
from langchain_core.messages import ChatMessage
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
MODEL_NAME = "llama3.1"
class TestChatOllama(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3-groq-tool-use"}
def test__parse_arguments_from_tool_call() -> None:
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
assert response is not None
assert isinstance(response["arg_1"], str)
@contextmanager
def _mock_httpx_client_stream(
*args: Any, **kwargs: Any
) -> Generator[Response, Any, Any]:
yield Response(
status_code=200,
content='{"message": {"role": "assistant", "content": "The meaning ..."}}',
request=Request(method="POST", url="http://whocares:11434"),
)
def test_arbitrary_roles_accepted_in_chatmessages(
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
llm = ChatOllama(
base_url="http://whocares:11434",
model=MODEL_NAME,
verbose=True,
format=None,
)
messages = [
ChatMessage(
role="somerandomrole",
content="I'm ok with you adding any role message now!",
),
ChatMessage(role="control", content="thinking"),
ChatMessage(role="user", content="What is the meaning of life?"),
]
llm.invoke(messages)
@patch("langchain_ollama.chat_models.validate_model")
def test_validate_model_on_init(mock_validate_model: Any) -> None:
"""Test that the model is validated on initialization when requested."""
# Test that validate_model is called when validate_model_on_init=True
ChatOllama(model=MODEL_NAME, validate_model_on_init=True)
mock_validate_model.assert_called_once()
mock_validate_model.reset_mock()
# Test that validate_model is NOT called when validate_model_on_init=False
ChatOllama(model=MODEL_NAME, validate_model_on_init=False)
mock_validate_model.assert_not_called()
# Test that validate_model is NOT called by default
ChatOllama(model=MODEL_NAME)
mock_validate_model.assert_not_called()
|
_base_ = './ms-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './ms_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
from mmdet.utils import register_all_modules
register_all_modules(True)
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for data_sample in data:
pred_instances = data_sample.pred_instances
bboxes = pred_instances.bboxes.cpu().numpy().astype(
np.float32).tolist()
labels = pred_instances.labels.cpu().numpy().astype(
np.int32).tolist()
scores = pred_instances.scores.cpu().numpy().astype(
np.float32).tolist()
preds = []
for idx in range(len(labels)):
cls_score, bbox, cls_label = scores[idx], bboxes[idx], labels[
idx]
if cls_score >= self.threshold:
class_name = self.model.dataset_meta['classes'][cls_label]
result = dict(
class_label=cls_label,
class_name=class_name,
bbox=bbox,
score=cls_score)
preds.append(result)
output.append(preds)
return output
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
from mmdet.utils import register_all_modules
register_all_modules(True)
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for data_sample in data:
pred_instances = data_sample.pred_instances
bboxes = pred_instances.bboxes.cpu().numpy().astype(
np.float32).tolist()
labels = pred_instances.labels.cpu().numpy().astype(
np.int32).tolist()
scores = pred_instances.scores.cpu().numpy().astype(
np.float32).tolist()
preds = []
for idx in range(len(labels)):
cls_score, bbox, cls_label = scores[idx], bboxes[idx], labels[
idx]
if cls_score >= self.threshold:
class_name = self.model.dataset_meta['CLASSES'][cls_label]
result = dict(
class_label=cls_label,
class_name=class_name,
bbox=bbox,
score=cls_score)
preds.append(result)
output.append(preds)
return output
|
from llama_index.llms.mistralai import MistralAI
from llama_index.multi_modal_llms.mistralai import MistralAIMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in MistralAIMultiModal.__mro__]
assert MistralAI.__name__ in names_of_base_classes
def test_init():
m = MistralAIMultiModal(max_tokens=400, api_key="test")
assert m.max_tokens == 400
|
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.mistralai import MistralAIMultiModal
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in MistralAIMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
def test_init():
m = MistralAIMultiModal(max_tokens=400)
assert m.max_tokens == 400
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=SpacyTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=SpacyTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
IOU_CALCULATORS = Registry('IoU calculator')
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
|
from mmcv.utils import Registry, build_from_cfg
IOU_CALCULATORS = Registry('IoU calculator')
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
"""[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
.. v2betastatus:: ConvertDtype transform
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBox(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. v2betastatus:: ClampBoundingBox transform
"""
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
"""[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
.. betastatus:: ConvertDtype transform
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBox(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. betastatus:: ClampBoundingBox transform
"""
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
OPEN_METEO_DOCS = """BASE URL: https://api.open-meteo.com/
API Documentation
The API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:
Parameter Format Required Default Description
latitude, longitude Floating point Yes Geographical WGS84 coordinate of the location
hourly String array No A list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.
daily String array No A list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.
current_weather Bool No false Include current weather conditions in the JSON output.
temperature_unit String No celsius If fahrenheit is set, all temperature values are converted to Fahrenheit.
windspeed_unit String No kmh Other wind speed speed units: ms, mph and kn
precipitation_unit String No mm Other precipitation amount units: inch
timeformat String No iso8601 If format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.
timezone String No GMT If timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.
past_days Integer (0-2) No 0 If past_days is set, yesterday or the day before yesterday data are also returned.
start_date
end_date String (yyyy-mm-dd) No The time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).
models String array No auto Manually select one or more weather models. Per default, the best suitable weather models will be combined.
Hourly Parameter Definition
The parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.
Variable Valid time Unit Description
temperature_2m Instant °C (°F) Air temperature at 2 meters above ground
snowfall Preceding hour sum cm (inch) Snowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent
rain Preceding hour sum mm (inch) Rain from large scale weather systems of the preceding hour in millimeter
showers Preceding hour sum mm (inch) Showers from convective precipitation in millimeters from the preceding hour
weathercode Instant WMO code Weather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.
snow_depth Instant meters Snow depth on the ground
freezinglevel_height Instant meters Altitude above sea level of the 0°C level
visibility Instant meters Viewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.""" # noqa: E501
|
# flake8: noqa
OPEN_METEO_DOCS = """BASE URL: https://api.open-meteo.com/
API Documentation
The API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:
Parameter Format Required Default Description
latitude, longitude Floating point Yes Geographical WGS84 coordinate of the location
hourly String array No A list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.
daily String array No A list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.
current_weather Bool No false Include current weather conditions in the JSON output.
temperature_unit String No celsius If fahrenheit is set, all temperature values are converted to Fahrenheit.
windspeed_unit String No kmh Other wind speed speed units: ms, mph and kn
precipitation_unit String No mm Other precipitation amount units: inch
timeformat String No iso8601 If format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.
timezone String No GMT If timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.
past_days Integer (0-2) No 0 If past_days is set, yesterday or the day before yesterday data are also returned.
start_date
end_date String (yyyy-mm-dd) No The time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).
models String array No auto Manually select one or more weather models. Per default, the best suitable weather models will be combined.
Hourly Parameter Definition
The parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.
Variable Valid time Unit Description
temperature_2m Instant °C (°F) Air temperature at 2 meters above ground
snowfall Preceding hour sum cm (inch) Snowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent
rain Preceding hour sum mm (inch) Rain from large scale weather systems of the preceding hour in millimeter
showers Preceding hour sum mm (inch) Showers from convective precipitation in millimeters from the preceding hour
weathercode Instant WMO code Weather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.
snow_depth Instant meters Snow depth on the ground
freezinglevel_height Instant meters Altitude above sea level of the 0°C level
visibility Instant meters Viewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km."""
|
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
__all__ = ["JsonOutputKeyToolsParser", "JsonOutputToolsParser", "PydanticToolsParser"]
|
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
__all__ = ["PydanticToolsParser", "JsonOutputToolsParser", "JsonOutputKeyToolsParser"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for BLIP."""
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import auto_docstring
@auto_docstring
class BlipImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["BlipImageProcessorFast"]
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for BLIP."""
from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import add_start_docstrings
@add_start_docstrings(
"Constructs a fast BLIP image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class BlipImageProcessorFast(BaseImageProcessorFast):
# To be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
__all__ = ["BlipImageProcessorFast"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import requests
# Configuration
GITHUB_REPO = "huggingface/diffusers"
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID")
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
PATH_IN_REPO = os.getenv("PATH_IN_REPO")
def main(args):
action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}"
if args.status == "success":
hub_path = f"https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main/{PATH_IN_REPO}"
message = (
"✅ Community pipelines successfully mirrored.\n"
f"🕸️ GitHub Action URL: {action_url}.\n"
f"🤗 Hub location: {hub_path}."
)
else:
message = f"❌ Something wrong happened. Check out the GitHub Action to know more: {action_url}."
payload = {"text": message}
response = requests.post(SLACK_WEBHOOK_URL, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--status", type=str, default="success", choices=["success", "failure"])
args = parser.parse_args()
main(args)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import requests
# Configuration
GITHUB_REPO = "huggingface/diffusers"
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID")
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
PATH_IN_REPO = os.getenv("PATH_IN_REPO")
def main(args):
action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}"
if args.status == "success":
hub_path = f"https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main/{PATH_IN_REPO}"
message = (
"✅ Community pipelines successfully mirrored.\n"
f"🕸️ GitHub Action URL: {action_url}.\n"
f"🤗 Hub location: {hub_path}."
)
else:
message = f"❌ Something wrong happened. Check out the GitHub Action to know more: {action_url}."
payload = {"text": message}
response = requests.post(SLACK_WEBHOOK_URL, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--status", type=str, default="success", choices=["success", "failure"])
args = parser.parse_args()
main(args)
|
"""
Pandas csv structured store.
DEPRECATED: Please use :class:`PandasQueryEngine` in `llama-index-experimental` instead.
"""
from typing import Any
class PandasIndex:
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
raise DeprecationWarning(
"PandasQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import PandasQueryEngine`\n"
"Note that the PandasQueryEngine allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
# Legacy
GPTPandasIndex = PandasIndex
|
"""Pandas csv structured store.
DEPRECATED: Please use :class:`PandasQueryEngine` in `llama-index-experimental` instead.
"""
from typing import Any
class PandasIndex:
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
raise DeprecationWarning(
"PandasQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import PandasQueryEngine`\n"
"Note that the PandasQueryEngine allows for arbitrary code execution, \n"
"and should be used in a secure environment."
)
# Legacy
GPTPandasIndex = PandasIndex
|
import os
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.saving import load_model
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
@pytest.mark.requires_trainable_backend
def test_masking_with_tensor(self):
model = models.Sequential(
[
layers.Masking(mask_value=ops.convert_to_tensor([0.0])),
layers.LSTM(1),
]
)
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(temp_filepath)
reload_model = load_model(temp_filepath)
reload_model(x)
|
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.saving import load_model
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
@pytest.mark.requires_trainable_backend
def test_masking_with_tensor(self):
model = models.Sequential(
[
layers.Masking(mask_value=ops.convert_to_tensor([0.0])),
layers.LSTM(1),
]
)
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
model(x)
model.save("model.keras")
reload_model = load_model("model.keras")
reload_model(x)
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.built = True
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
|
"""Base schema for callback managers."""
import uuid
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
# timestamp for callback events
TIMESTAMP_FORMAT = "%m/%d/%Y, %H:%M:%S.%f"
# base trace_id for the tracemap in callback_manager
BASE_TRACE_EVENT = "root"
class CBEventType(str, Enum):
"""
Callback manager event types.
Attributes:
CHUNKING: Logs for the before and after of text splitting.
NODE_PARSING: Logs for the documents and the nodes that they are parsed into.
EMBEDDING: Logs for the number of texts embedded.
LLM: Logs for the template and response of LLM calls.
QUERY: Keeps track of the start and end of each query.
RETRIEVE: Logs for the nodes retrieved for a query.
SYNTHESIZE: Logs for the result for synthesize calls.
TREE: Logs for the summary and level of summaries generated.
SUB_QUESTION: Logs for a generated sub question and answer.
"""
CHUNKING = "chunking"
NODE_PARSING = "node_parsing"
EMBEDDING = "embedding"
LLM = "llm"
QUERY = "query"
RETRIEVE = "retrieve"
SYNTHESIZE = "synthesize"
TREE = "tree"
SUB_QUESTION = "sub_question"
TEMPLATING = "templating"
FUNCTION_CALL = "function_call"
RERANKING = "reranking"
EXCEPTION = "exception"
AGENT_STEP = "agent_step"
class EventPayload(str, Enum):
DOCUMENTS = "documents" # list of documents before parsing
CHUNKS = "chunks" # list of text chunks
NODES = "nodes" # list of nodes
PROMPT = "formatted_prompt" # formatted prompt sent to LLM
MESSAGES = "messages" # list of messages sent to LLM
COMPLETION = "completion" # completion from LLM
RESPONSE = "response" # message response from LLM
QUERY_STR = "query_str" # query used for query engine
SUB_QUESTION = "sub_question" # a sub question & answer + sources
EMBEDDINGS = "embeddings" # list of embeddings
TOP_K = "top_k" # top k nodes retrieved
ADDITIONAL_KWARGS = "additional_kwargs" # additional kwargs for event call
SERIALIZED = "serialized" # serialized object for event caller
FUNCTION_CALL = "function_call" # function call for the LLM
FUNCTION_OUTPUT = "function_call_response" # function call output
TOOL = "tool" # tool used in LLM call
MODEL_NAME = "model_name" # model name used in an event
TEMPLATE = "template" # template used in LLM call
TEMPLATE_VARS = "template_vars" # template variables used in LLM call
SYSTEM_PROMPT = "system_prompt" # system prompt used in LLM call
QUERY_WRAPPER_PROMPT = "query_wrapper_prompt" # query wrapper prompt used in LLM
EXCEPTION = "exception" # exception raised in an event
# events that will never have children events
LEAF_EVENTS = (CBEventType.CHUNKING, CBEventType.LLM, CBEventType.EMBEDDING)
@dataclass
class CBEvent:
"""Generic class to store event information."""
event_type: CBEventType
payload: Optional[Dict[str, Any]] = None
time: str = ""
id_: str = ""
def __post_init__(self) -> None:
"""Init time and id if needed."""
if not self.time:
self.time = datetime.now().strftime(TIMESTAMP_FORMAT)
if not self.id_:
self.id = str(uuid.uuid4())
@dataclass
class EventStats:
"""Time-based Statistics for events."""
total_secs: float
average_secs: float
total_count: int
|
"""Base schema for callback managers."""
import uuid
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
# timestamp for callback events
TIMESTAMP_FORMAT = "%m/%d/%Y, %H:%M:%S.%f"
# base trace_id for the tracemap in callback_manager
BASE_TRACE_EVENT = "root"
class CBEventType(str, Enum):
"""
Callback manager event types.
Attributes:
CHUNKING: Logs for the before and after of text splitting.
NODE_PARSING: Logs for the documents and the nodes that they are parsed into.
EMBEDDING: Logs for the number of texts embedded.
LLM: Logs for the template and response of LLM calls.
QUERY: Keeps track of the start and end of each query.
RETRIEVE: Logs for the nodes retrieved for a query.
SYNTHESIZE: Logs for the result for synthesize calls.
TREE: Logs for the summary and level of summaries generated.
SUB_QUESTION: Logs for a generated sub question and answer.
"""
CHUNKING = "chunking"
NODE_PARSING = "node_parsing"
EMBEDDING = "embedding"
LLM = "llm"
QUERY = "query"
RETRIEVE = "retrieve"
SYNTHESIZE = "synthesize"
TREE = "tree"
SUB_QUESTION = "sub_question"
TEMPLATING = "templating"
FUNCTION_CALL = "function_call"
RERANKING = "reranking"
EXCEPTION = "exception"
AGENT_STEP = "agent_step"
class EventPayload(str, Enum):
DOCUMENTS = "documents" # list of documents before parsing
CHUNKS = "chunks" # list of text chunks
NODES = "nodes" # list of nodes
PROMPT = "formatted_prompt" # formatted prompt sent to LLM
MESSAGES = "messages" # list of messages sent to LLM
COMPLETION = "completion" # completion from LLM
RESPONSE = "response" # message response from LLM
QUERY_STR = "query_str" # query used for query engine
SUB_QUESTION = "sub_question" # a sub question & answer + sources
EMBEDDINGS = "embeddings" # list of embeddings
TOP_K = "top_k" # top k nodes retrieved
ADDITIONAL_KWARGS = "additional_kwargs" # additional kwargs for event call
SERIALIZED = "serialized" # serialized object for event caller
FUNCTION_CALL = "function_call" # function call for the LLM
FUNCTION_OUTPUT = "function_call_response" # function call output
TOOL = "tool" # tool used in LLM call
MODEL_NAME = "model_name" # model name used in an event
TEMPLATE = "template" # template used in LLM call
TEMPLATE_VARS = "template_vars" # template variables used in LLM call
SYSTEM_PROMPT = "system_prompt" # system prompt used in LLM call
QUERY_WRAPPER_PROMPT = "query_wrapper_prompt" # query wrapper prompt used in LLM
EXCEPTION = "exception" # exception raised in an event
# events that will never have children events
LEAF_EVENTS = (CBEventType.CHUNKING, CBEventType.LLM, CBEventType.EMBEDDING)
@dataclass
class CBEvent:
"""Generic class to store event information."""
event_type: CBEventType
payload: Optional[Dict[str, Any]] = None
time: str = ""
id_: str = ""
def __post_init__(self) -> None:
"""Init time and id if needed."""
if not self.time:
self.time = datetime.now().strftime(TIMESTAMP_FORMAT)
if not self.id_:
self.id = str(uuid.uuid4())
@dataclass
class EventStats:
"""Time-based Statistics for events."""
total_secs: float
average_secs: float
total_count: int
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .fpn_dropblock import FPN_DropBlock
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .ssh import SSH
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH',
'FPN_DropBlock'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .ssh import SSH
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH'
]
|
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture(scope="session")
def _splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def splade_bert_tiny_model(_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _inference_free_splade_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def inference_free_splade_bert_tiny_model(_inference_free_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_inference_free_splade_bert_tiny_model)
@pytest.fixture(scope="session")
def _csr_bert_tiny_model() -> SparseEncoder:
model = SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
model[-1].k = 16
model[-1].k_aux = 32
model.model_card_data.generate_widget_examples = False # Disable widget examples generation for testing
return model
@pytest.fixture()
def csr_bert_tiny_model(_csr_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_csr_bert_tiny_model)
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'TensorFlowTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
pass
else:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowTensor'])
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
|
_base_ = './faster-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './faster_rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import os
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from typing import Any
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url="https://test_url/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
return instance._client.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as e:
Interface()
assert "API key is required" in str(e.value)
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
Interface(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = Interface(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(Interface()) == "ENV"
assert get_api_key(Interface(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration
def test_missing_api_key_error(masked_env_var: str) -> None:
with pytest.raises(ValueError) as err_msg:
Interface()
assert "An API key is required" in str(err_msg.value)
@pytest.mark.integration
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = Interface(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.get_query_embedding("Hello, world!")
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = Interface(model=model, **{**mode, **{param: masked_env_var}})
client.get_query_embedding("Hello, world!")
|
import os
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from typing import Any
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url="https://test_url/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
return instance._client.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as e:
Interface()
assert "API key is required" in str(e.value)
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
Interface(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = Interface(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(Interface()) == "ENV"
assert get_api_key(Interface(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_missing_api_key_error(masked_env_var: str) -> None:
with pytest.raises(ValueError) as err_msg:
Interface()
assert "An API key is required" in str(err_msg.value)
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = Interface(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.get_query_embedding("Hello, world!")
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = Interface(model=model, **{**mode, **{param: masked_env_var}})
client.get_query_embedding("Hello, world!")
|
from unittest import mock
# import aiohttp to force Pants to include it in the required dependencies
import aiohttp # noqa
import pytest
from azure.ai.inference.models import EmbeddingItem, EmbeddingsResult
from llama_index.core.schema import TextNode
from llama_index.embeddings.azure_inference import AzureAIEmbeddingsModel
@pytest.fixture()
def test_embed_model():
with mock.patch(
"llama_index.embeddings.azure_inference.base.EmbeddingsClient", autospec=True
):
embed_model = AzureAIEmbeddingsModel(
endpoint="https://my-endpoint.inference.ai.azure.com",
credential="my-api-key",
model_name="my_model_name",
)
embed_model._client.embed.return_value = EmbeddingsResult(
data=[EmbeddingItem(embedding=[1.0, 2.0, 3.0], index=0)]
)
return embed_model
def test_embed(test_embed_model: AzureAIEmbeddingsModel):
"""Test the basic embedding functionality."""
# In case the endpoint being tested serves more than one model
nodes = [
TextNode(
text="Before college the two main things I worked on, "
"outside of school, were writing and programming."
)
]
response = test_embed_model(nodes=nodes)
assert len(response) == len(nodes)
assert response[0].embedding
def test_get_metadata(test_embed_model: AzureAIEmbeddingsModel, caplog):
"""
Tests if we can get model metadata back from the endpoint. If so,
model_name should not be 'unknown'. Some endpoints may not support this
and in those cases a warning should be logged.
"""
assert (
test_embed_model.model_name != "unknown"
or "does not support model metadata retrieval" in caplog.text
)
|
from unittest import mock
# import aiohttp to force Pants to include it in the required dependencies
import aiohttp # noqa
import pytest
from azure.ai.inference.models import EmbeddingItem, EmbeddingsResult
from llama_index.core.schema import TextNode
from llama_index.embeddings.azure_inference import AzureAIEmbeddingsModel
@pytest.fixture()
def test_embed_model():
with mock.patch(
"llama_index.embeddings.azure_inference.base.EmbeddingsClient", autospec=True
):
embed_model = AzureAIEmbeddingsModel(
endpoint="https://my-endpoint.inference.ai.azure.com",
credential="my-api-key",
model_name="my_model_name",
)
embed_model._client.embed.return_value = EmbeddingsResult(
data=[EmbeddingItem(embedding=[1.0, 2.0, 3.0], index=0)]
)
return embed_model
def test_embed(test_embed_model: AzureAIEmbeddingsModel):
"""Test the basic embedding functionality."""
# In case the endpoint being tested serves more than one model
nodes = [
TextNode(
text="Before college the two main things I worked on, "
"outside of school, were writing and programming."
)
]
response = test_embed_model(nodes=nodes)
assert len(response) == len(nodes)
assert response[0].embedding
def test_get_metadata(test_embed_model: AzureAIEmbeddingsModel, caplog):
"""Tests if we can get model metadata back from the endpoint. If so,
model_name should not be 'unknown'. Some endpoints may not support this
and in those cases a warning should be logged.
"""
assert (
test_embed_model.model_name != "unknown"
or "does not support model metadata retrieval" in caplog.text
)
|
from typing import Any, Optional
from llama_index.core.bridge.pydantic import Field, model_serializer
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
"max_iterations": self.max_iterations,
}
|
from typing import Any, Optional
from llama_index.core.bridge.pydantic import Field, model_serializer
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Optional[Any] = Field(default=None, exclude=True)
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
}
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_dataframe(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_dataframe(df=df)
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocList[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_pandas(df=df)
|
import numpy as np
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDoc(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
import numpy as np
from docarray.base_document import AnyDocument, BaseDocument
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDocument):
text: str
tensor: NdArray
class CustomDoc(BaseDocument):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDocument(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
from pathlib import Path
from typing import Union, Optional, Callable, TYPE_CHECKING, Generator
if TYPE_CHECKING:
from docarray import DocumentArray
from docarray.typing import T
from multiprocessing.pool import ThreadPool, Pool
class DataLoaderMixin:
@classmethod
def dataloader(
cls,
path: Union[str, Path],
func: Callable[['DocumentArray'], 'T'],
batch_size: int,
protocol: str = 'protobuf',
compress: Optional[str] = None,
backend: str = 'thread',
num_worker: Optional[int] = None,
pool: Optional[Union['Pool', 'ThreadPool']] = None,
show_progress: bool = False,
) -> Generator['DocumentArray', None, None]:
"""Load array elements, batches and maps them with a function in parallel, finally yield the batch in DocumentArray
:param path: Path or filename where the data is stored.
:param func: a function that takes :class:`DocumentArray` as input and outputs anything. You can either modify elements
in-place (only with `thread` backend) or work later on return elements.
:param batch_size: Size of each generated batch (except the last one, which might be smaller)
:param protocol: protocol to use
:param compress: compress algorithm to use
:param backend: if to use multi-`process` or multi-`thread` as the parallelization backend. In general, if your
``func`` is IO-bound then perhaps `thread` is good enough. If your ``func`` is CPU-bound then you may use `process`.
In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements
in-place, regardless of IO/CPU-bound, you should always use `thread` backend.
.. warning::
When using `process` backend, you should not expect ``func`` modify elements in-place. This is because
the multiprocessing backing pass the variable via pickle and work in another process. The passed object
and the original object do **not** share the same memory.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
:param pool: use an existing/external pool. If given, `backend` is ignored and you will be responsible for closing the pool.
:param show_progress: if set, show a progressbar
:return:
"""
from docarray.array.mixins.dataloader.helper import DocumentArrayLoader
for da in DocumentArrayLoader(
path, protocol=protocol, compress=compress, show_progress=False
).map_batch(
func,
batch_size=batch_size,
backend=backend,
num_worker=num_worker,
pool=pool,
show_progress=show_progress,
):
yield da
|
from pathlib import Path
from typing import Union, Optional, Callable, TYPE_CHECKING, Generator
if TYPE_CHECKING:
from docarray import DocumentArray
from docarray.typing import T
from multiprocessing.pool import ThreadPool, Pool
class DataLoaderMixin:
@classmethod
def dataloader(
cls,
path: Union[str, Path],
func: Callable[['DocumentArray'], 'T'],
batch_size: int,
protocol: str = 'protobuf',
compress: Optional[str] = None,
backend: str = 'thread',
num_worker: Optional[int] = None,
pool: Optional[Union['Pool', 'ThreadPool']] = None,
show_progress: bool = False,
) -> Generator['DocumentArray', None, None]:
"""Load array elements, batches and maps them with a function in parallel, finally yield the batch in DocumentArray
:param path: Path or filename where the data is stored.
:param func: a function that takes :class:`DocumentArray` as input and outputs anything. You can either modify elements
in-place (only with `thread` backend) or work later on return elements.
:param batch_size: Size of each generated batch (except the last one, which might be smaller)
:param protocol: protocol to use
:param compress: compress algorithm to use
:param backend: if to use multi-`process` or multi-`thread` as the parallelization backend. In general, if your
``func`` is IO-bound then perhaps `thread` is good enough. If your ``func`` is CPU-bound then you may use `process`.
In practice, you should try yourselves to figure out the best value. However, if you wish to modify the elements
in-place, regardless of IO/CPU-bound, you should always use `thread` backend.
.. warning::
When using `process` backend, you should not expect ``func`` modify elements in-place. This is because
the multiprocessing backing pass the variable via pickle and work in another process. The passed object
and the original object do **not** share the same memory.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
:param pool: use an existing/external pool. If given, `backend` is ignored and you will be responsible for closing the pool.
:param show_progress: if set, show a progressbar
:return:
"""
from .helper import DocumentArrayLoader
for da in DocumentArrayLoader(
path, protocol=protocol, compress=compress, show_progress=False
).map_batch(
func,
batch_size=batch_size,
backend=backend,
num_worker=num_worker,
pool=pool,
show_progress=show_progress,
):
yield da
|
import os
from pathlib import Path
from jina import __cache_path__
def generate_default_volume_and_workspace(workspace_id=''):
"""automatically generate a docker volume, and an Executor workspace inside it
:param workspace_id: id that will be part of the fallback workspace path. Default is not adding such an id
:return: List of volumes and a workspace string
"""
default_workspace = __cache_path__
container_addr = '/app'
if default_workspace: # use default workspace provided in env var
host_addr = default_workspace
workspace = os.path.relpath(
path=os.path.abspath(default_workspace), start=Path.home()
)
else: # fallback if no custom volume and no default workspace
workspace = os.path.join(__cache_path__, 'executor-workspace')
host_addr = os.path.join(
Path.home(),
workspace,
workspace_id,
)
workspace_in_container = os.path.join(container_addr, workspace)
generated_volumes = [os.path.abspath(host_addr) + f':{container_addr}']
return generated_volumes, workspace_in_container
|
import os
from pathlib import Path
from jina import __cache_path__
def generate_default_volume_and_workspace(workspace_id=''):
"""automatically generate a docker volume, and an Executor workspace inside it
:param workspace_id: id that will be part of the fallback workspace path. Default is not adding such an id
:return: List of volumes and a workspace string
"""
default_workspace = os.environ.get('JINA_DEFAULT_WORKSPACE_BASE')
container_addr = '/app'
if default_workspace: # use default workspace provided in env var
host_addr = default_workspace
workspace = os.path.relpath(
path=os.path.abspath(default_workspace), start=Path.home()
)
else: # fallback if no custom volume and no default workspace
workspace = os.path.join(__cache_path__, 'executor-workspace')
host_addr = os.path.join(
Path.home(),
workspace,
workspace_id,
)
workspace_in_container = os.path.join(container_addr, workspace)
generated_volumes = [os.path.abspath(host_addr) + f':{container_addr}']
return generated_volumes, workspace_in_container
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Tuple, Union
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.structures import SampleList
from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig
from ..utils import unpack_gt_instances
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
"""Base class for mask heads used in One-Stage Instance Segmentation."""
def __init__(self, init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
@abstractmethod
def loss_by_feat(self, *args, **kwargs):
"""Calculate the loss based on the features extracted by the mask
head."""
pass
@abstractmethod
def predict_by_feat(self, *args, **kwargs):
"""Transform a batch of output features extracted from the head into
mask results."""
pass
def loss(self,
x: Union[List[Tensor], Tuple[Tensor]],
batch_data_samples: SampleList,
positive_infos: OptInstanceList = None,
**kwargs) -> dict:
"""Perform forward propagation and loss calculation of the mask head on
the features of the upstream network.
Args:
x (list[Tensor] | tuple[Tensor]): Features from FPN.
Each has a shape (B, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
positive_infos (list[:obj:`InstanceData`], optional): Information
of positive samples. Used when the label assignment is
done outside the MaskHead, e.g., BboxHead in
YOLACT or CondInst, etc. When the label assignment is done in
MaskHead, it would be None, like SOLO or SOLOv2. All values
in it should have shape (num_positive_samples, *).
Returns:
dict: A dictionary of loss components.
"""
if positive_infos is None:
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \
'even if only one item is returned'
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
for gt_instances, img_metas in zip(batch_gt_instances,
batch_img_metas):
img_shape = img_metas['batch_input_shape']
gt_masks = gt_instances.masks.pad(img_shape)
gt_instances.masks = gt_masks
losses = self.loss_by_feat(
*outs,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
positive_infos=positive_infos,
batch_gt_instances_ignore=batch_gt_instances_ignore,
**kwargs)
return losses
def predict(self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = False,
results_list: OptInstanceList = None,
**kwargs) -> InstanceList:
"""Test function without test-time augmentation.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
results_list (list[obj:`InstanceData`], optional): Detection
results of each image after the post process. Only exist
if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.
Returns:
list[obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance,)
- labels (Tensor): Has a shape (num_instances,).
- masks (Tensor): Processed mask results, has a
shape (num_instances, h, w).
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
if results_list is None:
outs = self(x)
else:
outs = self(x, results_list)
results_list = self.predict_by_feat(
*outs,
batch_img_metas=batch_img_metas,
rescale=rescale,
results_list=results_list,
**kwargs)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Tuple, Union
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig
from ..utils import unpack_gt_instances
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
"""Base class for mask heads used in One-Stage Instance Segmentation."""
def __init__(self, init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
@abstractmethod
def loss_by_feat(self, *args, **kwargs):
"""Calculate the loss based on the features extracted by the mask
head."""
pass
@abstractmethod
def predict_by_feat(self, *args, **kwargs):
"""Transform a batch of output features extracted from the head into
mask results."""
pass
def loss(self,
x: Union[List[Tensor], Tuple[Tensor]],
batch_data_samples: SampleList,
positive_infos: OptInstanceList = None,
**kwargs) -> dict:
"""Perform forward propagation and loss calculation of the mask head on
the features of the upstream network.
Args:
x (list[Tensor] | tuple[Tensor]): Features from FPN.
Each has a shape (B, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
positive_infos (list[:obj:`InstanceData`], optional): Information
of positive samples. Used when the label assignment is
done outside the MaskHead, e.g., BboxHead in
YOLACT or CondInst, etc. When the label assignment is done in
MaskHead, it would be None, like SOLO or SOLOv2. All values
in it should have shape (num_positive_samples, *).
Returns:
dict: A dictionary of loss components.
"""
if positive_infos is None:
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \
'even if only one item is returned'
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
for gt_instances, img_metas in zip(batch_gt_instances,
batch_img_metas):
img_shape = img_metas['batch_input_shape']
gt_masks = gt_instances.masks.pad(img_shape)
gt_instances.masks = gt_masks
losses = self.loss_by_feat(
*outs,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
positive_infos=positive_infos,
batch_gt_instances_ignore=batch_gt_instances_ignore,
**kwargs)
return losses
def predict(self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = False,
results_list: OptInstanceList = None,
**kwargs) -> InstanceList:
"""Test function without test-time augmentation.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
results_list (list[obj:`InstanceData`], optional): Detection
results of each image after the post process. Only exist
if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.
Returns:
list[obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance,)
- labels (Tensor): Has a shape (num_instances,).
- masks (Tensor): Processed mask results, has a
shape (num_instances, h, w).
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
if results_list is None:
outs = self(x)
else:
outs = self(x, results_list)
results_list = self.predict_by_feat(
*outs,
batch_img_metas=batch_img_metas,
rescale=rescale,
results_list=results_list,
**kwargs)
return results_list
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import hashlib
import pandas as pd
import torch
from transformers import T5EncoderModel
from diffusers import StableDiffusion3Pipeline
PROMPT = "a photo of sks dog"
MAX_SEQ_LENGTH = 77
LOCAL_DATA_DIR = "dog"
OUTPUT_PATH = "sample_embeddings.parquet"
def bytes_to_giga_bytes(bytes):
return bytes / 1024 / 1024 / 1024
def generate_image_hash(image_path):
with open(image_path, "rb") as f:
img_data = f.read()
return hashlib.sha256(img_data).hexdigest()
def load_sd3_pipeline():
id = "stabilityai/stable-diffusion-3-medium-diffusers"
text_encoder = T5EncoderModel.from_pretrained(id, subfolder="text_encoder_3", load_in_8bit=True, device_map="auto")
pipeline = StableDiffusion3Pipeline.from_pretrained(
id, text_encoder_3=text_encoder, transformer=None, vae=None, device_map="balanced"
)
return pipeline
@torch.no_grad()
def compute_embeddings(pipeline, prompt, max_sequence_length):
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = pipeline.encode_prompt(prompt=prompt, prompt_2=None, prompt_3=None, max_sequence_length=max_sequence_length)
print(
f"{prompt_embeds.shape=}, {negative_prompt_embeds.shape=}, {pooled_prompt_embeds.shape=}, {negative_pooled_prompt_embeds.shape}"
)
max_memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
print(f"Max memory allocated: {max_memory:.3f} GB")
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
def run(args):
pipeline = load_sd3_pipeline()
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = compute_embeddings(
pipeline, args.prompt, args.max_sequence_length
)
# Assumes that the images within `args.local_image_dir` have a JPEG extension. Change
# as needed.
image_paths = glob.glob(f"{args.local_data_dir}/*.jpeg")
data = []
for image_path in image_paths:
img_hash = generate_image_hash(image_path)
data.append(
(img_hash, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds)
)
# Create a DataFrame
embedding_cols = [
"prompt_embeds",
"negative_prompt_embeds",
"pooled_prompt_embeds",
"negative_pooled_prompt_embeds",
]
df = pd.DataFrame(
data,
columns=["image_hash"] + embedding_cols,
)
# Convert embedding lists to arrays (for proper storage in parquet)
for col in embedding_cols:
df[col] = df[col].apply(lambda x: x.cpu().numpy().flatten().tolist())
# Save the dataframe to a parquet file
df.to_parquet(args.output_path)
print(f"Data successfully serialized to {args.output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", type=str, default=PROMPT, help="The instance prompt.")
parser.add_argument(
"--max_sequence_length",
type=int,
default=MAX_SEQ_LENGTH,
help="Maximum sequence length to use for computing the embeddings. The more the higher computational costs.",
)
parser.add_argument(
"--local_data_dir", type=str, default=LOCAL_DATA_DIR, help="Path to the directory containing instance images."
)
parser.add_argument("--output_path", type=str, default=OUTPUT_PATH, help="Path to serialize the parquet file.")
args = parser.parse_args()
run(args)
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import hashlib
import pandas as pd
import torch
from transformers import T5EncoderModel
from diffusers import StableDiffusion3Pipeline
PROMPT = "a photo of sks dog"
MAX_SEQ_LENGTH = 77
LOCAL_DATA_DIR = "dog"
OUTPUT_PATH = "sample_embeddings.parquet"
def bytes_to_giga_bytes(bytes):
return bytes / 1024 / 1024 / 1024
def generate_image_hash(image_path):
with open(image_path, "rb") as f:
img_data = f.read()
return hashlib.sha256(img_data).hexdigest()
def load_sd3_pipeline():
id = "stabilityai/stable-diffusion-3-medium-diffusers"
text_encoder = T5EncoderModel.from_pretrained(id, subfolder="text_encoder_3", load_in_8bit=True, device_map="auto")
pipeline = StableDiffusion3Pipeline.from_pretrained(
id, text_encoder_3=text_encoder, transformer=None, vae=None, device_map="balanced"
)
return pipeline
@torch.no_grad()
def compute_embeddings(pipeline, prompt, max_sequence_length):
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = pipeline.encode_prompt(prompt=prompt, prompt_2=None, prompt_3=None, max_sequence_length=max_sequence_length)
print(
f"{prompt_embeds.shape=}, {negative_prompt_embeds.shape=}, {pooled_prompt_embeds.shape=}, {negative_pooled_prompt_embeds.shape}"
)
max_memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
print(f"Max memory allocated: {max_memory:.3f} GB")
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
def run(args):
pipeline = load_sd3_pipeline()
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = compute_embeddings(
pipeline, args.prompt, args.max_sequence_length
)
# Assumes that the images within `args.local_image_dir` have a JPEG extension. Change
# as needed.
image_paths = glob.glob(f"{args.local_data_dir}/*.jpeg")
data = []
for image_path in image_paths:
img_hash = generate_image_hash(image_path)
data.append(
(img_hash, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds)
)
# Create a DataFrame
embedding_cols = [
"prompt_embeds",
"negative_prompt_embeds",
"pooled_prompt_embeds",
"negative_pooled_prompt_embeds",
]
df = pd.DataFrame(
data,
columns=["image_hash"] + embedding_cols,
)
# Convert embedding lists to arrays (for proper storage in parquet)
for col in embedding_cols:
df[col] = df[col].apply(lambda x: x.cpu().numpy().flatten().tolist())
# Save the dataframe to a parquet file
df.to_parquet(args.output_path)
print(f"Data successfully serialized to {args.output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", type=str, default=PROMPT, help="The instance prompt.")
parser.add_argument(
"--max_sequence_length",
type=int,
default=MAX_SEQ_LENGTH,
help="Maximum sequence length to use for computing the embeddings. The more the higher computational costs.",
)
parser.add_argument(
"--local_data_dir", type=str, default=LOCAL_DATA_DIR, help="Path to the directory containing instance images."
)
parser.add_argument("--output_path", type=str, default=OUTPUT_PATH, help="Path to serialize the parquet file.")
args = parser.parse_args()
run(args)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import re
from typing import Dict, List, Optional
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class Sentencizer(Executor):
"""
:class:`Sentencizer` split the text on the doc-level
into sentences on the chunk-level with a rule-base strategy.
The text is split by the punctuation characters listed in ``punct_chars``.
The sentences that are shorter than the ``min_sent_len``
or longer than the ``max_sent_len`` after stripping will be discarded.
:param min_sent_len: the minimal number of characters,
(including white spaces) of the sentence, by default 1.
:param max_sent_len: the maximal number of characters,
(including white spaces) of the sentence, by default 512.
:param punct_chars: the punctuation characters to split on,
whatever is in the list will be used,
for example ['!', '.', '?'] will use '!', '.' and '?'
:param uniform_weight: the definition of it should have
uniform weight or should be calculated
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
min_sent_len: int = 1,
max_sent_len: int = 512,
punct_chars: Optional[List[str]] = None,
uniform_weight: bool = True,
default_traversal_path: Optional[List[str]] = None,
*args,
**kwargs
):
"""Set constructor."""
super().__init__(*args, **kwargs)
self.min_sent_len = min_sent_len
self.max_sent_len = max_sent_len
self.punct_chars = punct_chars
self.uniform_weight = uniform_weight
self.logger = JinaLogger(self.__class__.__name__)
self.default_traversal_path = default_traversal_path or ['r']
if not punct_chars:
self.punct_chars = [
'!',
'.',
'?',
'։',
'؟',
'۔',
'܀',
'܁',
'܂',
'‼',
'‽',
'⁇',
'⁈',
'⁉',
'⸮',
'﹖',
'﹗',
'!',
'.',
'?',
'。',
'。',
'\n',
]
if self.min_sent_len > self.max_sent_len:
self.logger.warning(
'the min_sent_len (={}) should be smaller or equal to the max_sent_len (={})'.format(
self.min_sent_len, self.max_sent_len
)
)
self._slit_pat = re.compile(
r'\s*([^{0}]+)(?<!\s)[{0}]*'.format(''.join(set(self.punct_chars)))
)
@requests
def segment(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Split the text into sentences.
:param docs: Documents that contain the text
:param parameters: Dictionary of parameters
:param kwargs: Additional keyword arguments
:return: a list of chunk dicts with the split sentences
"""
traversal_path = parameters.get('traversal_paths', self.default_traversal_path)
flat_docs = docs.traverse_flat(traversal_path)
for doc in flat_docs:
text = doc.text
ret = [
(m.group(0), m.start(), m.end())
for m in re.finditer(self._slit_pat, text)
]
if not ret:
ret = [(text, 0, len(text))]
for ci, (r, s, e) in enumerate(ret):
f = re.sub('\n+', ' ', r).strip()
f = f[: self.max_sent_len]
if len(f) > self.min_sent_len:
doc.chunks.append(
Document(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e],
)
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Dict
import re
from jina import Executor, DocumentArray, requests, Document
from jina.logging.logger import JinaLogger
class Sentencizer(Executor):
"""
:class:`Sentencizer` split the text on the doc-level
into sentences on the chunk-level with a rule-base strategy.
The text is split by the punctuation characters listed in ``punct_chars``.
The sentences that are shorter than the ``min_sent_len``
or longer than the ``max_sent_len`` after stripping will be discarded.
:param min_sent_len: the minimal number of characters,
(including white spaces) of the sentence, by default 1.
:param max_sent_len: the maximal number of characters,
(including white spaces) of the sentence, by default 512.
:param punct_chars: the punctuation characters to split on,
whatever is in the list will be used,
for example ['!', '.', '?'] will use '!', '.' and '?'
:param uniform_weight: the definition of it should have
uniform weight or should be calculated
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(self,
min_sent_len: int = 1,
max_sent_len: int = 512,
punct_chars: Optional[List[str]] = None,
uniform_weight: bool = True,
default_traversal_path: Optional[List[str]] = None,
*args, **kwargs):
"""Set constructor."""
super().__init__(*args, **kwargs)
self.min_sent_len = min_sent_len
self.max_sent_len = max_sent_len
self.punct_chars = punct_chars
self.uniform_weight = uniform_weight
self.logger = JinaLogger(self.__class__.__name__)
self.default_traversal_path = default_traversal_path or ['r']
if not punct_chars:
self.punct_chars = ['!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '‼', '‽', '⁇', '⁈', '⁉', '⸮', '﹖', '﹗',
'!', '.', '?', '。', '。', '\n']
if self.min_sent_len > self.max_sent_len:
self.logger.warning('the min_sent_len (={}) should be smaller or equal to the max_sent_len (={})'.format(
self.min_sent_len, self.max_sent_len))
self._slit_pat = re.compile('\s*([^{0}]+)(?<!\s)[{0}]*'.format(''.join(set(self.punct_chars))))
@requests
def segment(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Split the text into sentences.
:param docs: Documents that contain the text
:param parameters: Dictionary of parameters
:param kwargs: Additional keyword arguments
:return: a list of chunk dicts with the split sentences
"""
traversal_path = parameters.get('traversal_paths', self.default_traversal_path)
flat_docs = docs.traverse_flat(traversal_path)
for doc in flat_docs:
text = doc.text
ret = [(m.group(0), m.start(), m.end()) for m in
re.finditer(self._slit_pat, text)]
if not ret:
ret = [(text, 0, len(text))]
for ci, (r, s, e) in enumerate(ret):
f = re.sub('\n+', ' ', r).strip()
f = f[:self.max_sent_len]
if len(f) > self.min_sent_len:
doc.chunks.append(
Document(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e])
)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from docarray.base_document.mixins.plot import PlotMixin
from docarray.base_document.mixins.proto import ProtoMixin
__all__ = ['PlotMixin', 'ProtoMixin']
|
from docarray.base_document.mixins.proto import ProtoMixin
__all__ = ['ProtoMixin']
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(Image, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(Image, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(Image, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: Image
image2: Image
image3: Image
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
from docarray.documents import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super(WeightedLayerPooling, self).__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import pytest
from xgboost import testing as tm
from xgboost.testing.ordinal import run_cat_container, run_cat_container_mixed
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_arrow(), tm.no_pandas()))
def test_cat_container() -> None:
run_cat_container("cpu")
def test_cat_container_mixed() -> None:
run_cat_container_mixed("cpu")
|
import pytest
from xgboost import testing as tm
from xgboost.testing.ordinal import run_cat_container, run_cat_container_mixed
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_arrow(), tm.no_pandas()))
def test_cat_container() -> None:
run_cat_container("cpu")
def test_cat_container_mixed() -> None:
run_cat_container_mixed()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataSample]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FlyteCallbackHandler": "langchain_community.callbacks.flyte_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlyteCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.flyte_callback import FlyteCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FlyteCallbackHandler": "langchain_community.callbacks.flyte_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlyteCallbackHandler",
]
|
"""
This script contains an example how to perform re-ranking with a Cross-Encoder for semantic search.
First, we use an efficient Bi-Encoder to retrieve similar questions from the Quora Duplicate Questions dataset:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Then, we re-rank the hits from the Bi-Encoder using a Cross-Encoder.
"""
import csv
import os
import pickle
import time
from sentence_transformers import CrossEncoder, SentenceTransformer, util
# We use a BiEncoder (SentenceTransformer) that produces embeddings for questions.
# We then search for similar questions using cosine similarity and identify the top 100 most similar questions
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
num_candidates = 500
# To refine the results, we use a CrossEncoder. A CrossEncoder gets both inputs (input_question, retrieved_question)
# and outputs a score 0...1 indicating the similarity.
cross_encoder_model = CrossEncoder("cross-encoder/stsb-roberta-base")
# Dataset we want to use
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 20000
# Some local file to cache computed embeddings
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print(f"Corpus loaded with {len(corpus_sentences)} sentences / embeddings")
while True:
inp_question = input("Please enter a question: ")
print("Input question:", inp_question)
# First, retrieve candidates using cosine similarity search
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=num_candidates)
hits = hits[0] # Get the hits for the first query
print(f"Cosine-Similarity search took {time.time() - start_time:.3f} seconds")
print("Top 5 hits with cosine-similarity:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
# Now, do the re-ranking with the cross-encoder
start_time = time.time()
sentence_pairs = [[inp_question, corpus_sentences[hit["corpus_id"]]] for hit in hits]
ce_scores = cross_encoder_model.predict(sentence_pairs)
for idx in range(len(hits)):
hits[idx]["cross-encoder_score"] = ce_scores[idx]
# Sort list by CrossEncoder scores
hits = sorted(hits, key=lambda x: x["cross-encoder_score"], reverse=True)
print(f"\nRe-ranking with CrossEncoder took {time.time() - start_time:.3f} seconds")
print("Top 5 hits with CrossEncoder:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["cross-encoder_score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This script contains an example how to perform re-ranking with a Cross-Encoder for semantic search.
First, we use an efficient Bi-Encoder to retrieve similar questions from the Quora Duplicate Questions dataset:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Then, we re-rank the hits from the Bi-Encoder using a Cross-Encoder.
"""
import csv
import os
import pickle
import time
from sentence_transformers import CrossEncoder, SentenceTransformer, util
# We use a BiEncoder (SentenceTransformer) that produces embeddings for questions.
# We then search for similar questions using cosine similarity and identify the top 100 most similar questions
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
num_candidates = 500
# To refine the results, we use a CrossEncoder. A CrossEncoder gets both inputs (input_question, retrieved_question)
# and outputs a score 0...1 indicating the similarity.
cross_encoder_model = CrossEncoder("cross-encoder/stsb-roberta-base")
# Dataset we want to use
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 20000
# Some local file to cache computed embeddings
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
print("Input question:", inp_question)
# First, retrieve candidates using cosine similarity search
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=num_candidates)
hits = hits[0] # Get the hits for the first query
print("Cosine-Similarity search took {:.3f} seconds".format(time.time() - start_time))
print("Top 5 hits with cosine-similarity:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
# Now, do the re-ranking with the cross-encoder
start_time = time.time()
sentence_pairs = [[inp_question, corpus_sentences[hit["corpus_id"]]] for hit in hits]
ce_scores = cross_encoder_model.predict(sentence_pairs)
for idx in range(len(hits)):
hits[idx]["cross-encoder_score"] = ce_scores[idx]
# Sort list by CrossEncoder scores
hits = sorted(hits, key=lambda x: x["cross-encoder_score"], reverse=True)
print("\nRe-ranking with CrossEncoder took {:.3f} seconds".format(time.time() - start_time))
print("Top 5 hits with CrossEncoder:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["cross-encoder_score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.layers import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.utils import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
@MODELS.register_module()
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
"""Base class for panoptic heads."""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
test_cfg: OptConfigType = None,
loss_panoptic: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super().__init__(init_cfg=init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = num_things_classes + num_stuff_classes
self.test_cfg = test_cfg
if loss_panoptic:
self.loss_panoptic = MODELS.build(loss_panoptic)
else:
self.loss_panoptic = None
@property
def with_loss(self) -> bool:
"""bool: whether the panoptic head contains loss function."""
return self.loss_panoptic is not None
@abstractmethod
def loss(self, **kwargs):
"""Loss function."""
@abstractmethod
def predict(self, **kwargs):
"""Predict function."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ...builder import build_loss
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
"""Base class for panoptic heads."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
loss_panoptic=None,
init_cfg=None,
**kwargs):
super(BasePanopticFusionHead, self).__init__(init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = num_things_classes + num_stuff_classes
self.test_cfg = test_cfg
if loss_panoptic:
self.loss_panoptic = build_loss(loss_panoptic)
else:
self.loss_panoptic = None
@property
def with_loss(self):
"""bool: whether the panoptic head contains loss function."""
return self.loss_panoptic is not None
@abstractmethod
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""Forward function during training."""
@abstractmethod
def simple_test(self,
img_metas,
det_labels,
mask_preds,
seg_preds,
det_bboxes,
cfg=None,
**kwargs):
"""Test without augmentation."""
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
from .folder import default_loader, ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
For the MS version of the dataset, see
`TorchGeo <https://torchgeo.readthedocs.io/en/stable/api/datasets.html#eurosat>`__.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``root/eurosat`` exists.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[str], Any] = default_loader,
) -> None:
self.root = os.path.expanduser(root)
self._base_folder = os.path.join(self.root, "eurosat")
self._data_folder = os.path.join(self._base_folder, "2750")
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(
self._data_folder,
transform=transform,
target_transform=target_transform,
loader=loader,
)
self.root = os.path.expanduser(root)
def __len__(self) -> int:
return len(self.samples)
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder)
def download(self) -> None:
if self._check_exists():
return
os.makedirs(self._base_folder, exist_ok=True)
download_and_extract_archive(
"https://huggingface.co/datasets/torchgeo/eurosat/resolve/c877bcd43f099cd0196738f714544e355477f3fd/EuroSAT.zip",
download_root=self._base_folder,
md5="c8fa014336c82ac7804f0398fcb19387",
)
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
from .folder import default_loader, ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
For the MS version of the dataset, see
`TorchGeo <https://torchgeo.readthedocs.io/en/stable/api/datasets.html#eurosat>`__.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``root/eurosat`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[str], Any] = default_loader,
) -> None:
self.root = os.path.expanduser(root)
self._base_folder = os.path.join(self.root, "eurosat")
self._data_folder = os.path.join(self._base_folder, "2750")
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(
self._data_folder,
transform=transform,
target_transform=target_transform,
loader=loader,
)
self.root = os.path.expanduser(root)
def __len__(self) -> int:
return len(self.samples)
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder)
def download(self) -> None:
if self._check_exists():
return
os.makedirs(self._base_folder, exist_ok=True)
download_and_extract_archive(
"https://huggingface.co/datasets/torchgeo/eurosat/resolve/c877bcd43f099cd0196738f714544e355477f3fd/EuroSAT.zip",
download_root=self._base_folder,
md5="c8fa014336c82ac7804f0398fcb19387",
)
|
_base_ = './cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' # noqa: E501
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
|
_base_ = './cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
|
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.