input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import re
import sys
file_name = sys.argv[1]
with open(file_name, 'r') as f:
input = f.read()
# official semver regex: https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
versions_regex = '(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)'
output = re.sub(
f'(?P<dep>[a-zA-Z0-9]+)(==|>=)(?P<version>{versions_regex}).*:',
r'\g<dep>==\g<version>:',
input,
)
with open(file_name, 'w') as f:
f.write(output)
|
import re
import sys
file_name = sys.argv[1]
with open(file_name, 'r') as f:
input = f.read()
# official semver regex: https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
versions_regex = '(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)'
output = re.sub(f'(?P<dep>[a-zA-Z0-9]+)(==|>=)(?P<version>{versions_regex}).*:', r'\g<dep>==\g<version>:', input)
with open(file_name, 'w') as f:
f.write(output)
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
__all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling"]
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
from .TopKActivation import TopKActivation
__all__ = ["CSRSparsity", "TopKActivation", "MLMTransformer", "SpladePooling"]
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
This example presents how to estimate and visualize the variance of the Receiver
Operating Characteristic (ROC) metric using cross-validation.
ROC curves typically feature true positive rate (TPR) on the Y axis, and false
positive rate (FPR) on the X axis. This means that the top left corner of the
plot is the "ideal" point - a FPR of zero, and a TPR of one. This is not very
realistic, but it does mean that a larger Area Under the Curve (AUC) is usually
better. The "steepness" of ROC curves is also important, since it is ideal to
maximize the TPR while minimizing the FPR.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean AUC, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how different
the splits generated by K-fold cross-validation are from one another.
.. note::
See :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py` for a
complement of the present example explaining the averaging strategies to
generalize the metrics for multiclass classifiers.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load and prepare data
# =====================
#
# We import the :ref:`iris_dataset` which contains 3 classes, each one
# corresponding to a type of iris plant. One class is linearly separable from
# the other 2; the latter are **not** linearly separable from each other.
#
# In the following we binarize the dataset by dropping the "virginica" class
# (`class_id=2`). This means that the "versicolor" class (`class_id=1`) is
# regarded as the positive class and "setosa" as the negative class
# (`class_id=0`).
import numpy as np
from sklearn.datasets import load_iris
iris = load_iris()
target_names = iris.target_names
X, y = iris.data, iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# %%
# We also add noisy features to make the problem harder.
random_state = np.random.RandomState(0)
X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1)
# %%
# Classification and ROC analysis
# -------------------------------
#
# Here we run :func:`~sklearn.model_selection.cross_validate` on a
# :class:`~sklearn.svm.SVC` classifier, then use the computed cross-validation results
# to plot the ROC curves fold-wise. Notice that the baseline to define the chance
# level (dashed ROC curve) is a classifier that would always predict the most
# frequent class.
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import RocCurveDisplay, auc
from sklearn.model_selection import StratifiedKFold, cross_validate
n_splits = 6
cv = StratifiedKFold(n_splits=n_splits)
classifier = svm.SVC(kernel="linear", probability=True, random_state=random_state)
cv_results = cross_validate(
classifier, X, y, cv=cv, return_estimator=True, return_indices=True
)
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
curve_kwargs_list = [
dict(alpha=0.3, lw=1, color=colors[fold % len(colors)]) for fold in range(n_splits)
]
names = [f"ROC fold {idx}" for idx in range(n_splits)]
mean_fpr = np.linspace(0, 1, 100)
interp_tprs = []
_, ax = plt.subplots(figsize=(6, 6))
viz = RocCurveDisplay.from_cv_results(
cv_results,
X,
y,
ax=ax,
name=names,
curve_kwargs=curve_kwargs_list,
plot_chance_level=True,
)
for idx in range(n_splits):
interp_tpr = np.interp(mean_fpr, viz.fpr[idx], viz.tpr[idx])
interp_tpr[0] = 0.0
interp_tprs.append(interp_tpr)
mean_tpr = np.mean(interp_tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(viz.roc_auc)
ax.plot(
mean_fpr,
mean_tpr,
color="b",
label=r"Mean ROC (AUC = %0.2f $\pm$ %0.2f)" % (mean_auc, std_auc),
lw=2,
alpha=0.8,
)
std_tpr = np.std(interp_tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color="grey",
alpha=0.2,
label=r"$\pm$ 1 std. dev.",
)
ax.set(
xlabel="False Positive Rate",
ylabel="True Positive Rate",
title=f"Mean ROC curve with variability\n(Positive label '{target_names[1]}')",
)
ax.legend(loc="lower right")
plt.show()
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
This example presents how to estimate and visualize the variance of the Receiver
Operating Characteristic (ROC) metric using cross-validation.
ROC curves typically feature true positive rate (TPR) on the Y axis, and false
positive rate (FPR) on the X axis. This means that the top left corner of the
plot is the "ideal" point - a FPR of zero, and a TPR of one. This is not very
realistic, but it does mean that a larger Area Under the Curve (AUC) is usually
better. The "steepness" of ROC curves is also important, since it is ideal to
maximize the TPR while minimizing the FPR.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean AUC, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how different
the splits generated by K-fold cross-validation are from one another.
.. note::
See :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py` for a
complement of the present example explaining the averaging strategies to
generalize the metrics for multiclass classifiers.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load and prepare data
# =====================
#
# We import the :ref:`iris_dataset` which contains 3 classes, each one
# corresponding to a type of iris plant. One class is linearly separable from
# the other 2; the latter are **not** linearly separable from each other.
#
# In the following we binarize the dataset by dropping the "virginica" class
# (`class_id=2`). This means that the "versicolor" class (`class_id=1`) is
# regarded as the positive class and "setosa" as the negative class
# (`class_id=0`).
import numpy as np
from sklearn.datasets import load_iris
iris = load_iris()
target_names = iris.target_names
X, y = iris.data, iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# %%
# We also add noisy features to make the problem harder.
random_state = np.random.RandomState(0)
X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1)
# %%
# Classification and ROC analysis
# -------------------------------
#
# Here we run a :class:`~sklearn.svm.SVC` classifier with cross-validation and
# plot the ROC curves fold-wise. Notice that the baseline to define the chance
# level (dashed ROC curve) is a classifier that would always predict the most
# frequent class.
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import RocCurveDisplay, auc
from sklearn.model_selection import StratifiedKFold
n_splits = 6
cv = StratifiedKFold(n_splits=n_splits)
classifier = svm.SVC(kernel="linear", probability=True, random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots(figsize=(6, 6))
for fold, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X[train], y[train])
viz = RocCurveDisplay.from_estimator(
classifier,
X[test],
y[test],
name=f"ROC fold {fold}",
curve_kwargs=dict(alpha=0.3, lw=1),
ax=ax,
plot_chance_level=(fold == n_splits - 1),
)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(
mean_fpr,
mean_tpr,
color="b",
label=r"Mean ROC (AUC = %0.2f $\pm$ %0.2f)" % (mean_auc, std_auc),
lw=2,
alpha=0.8,
)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color="grey",
alpha=0.2,
label=r"$\pm$ 1 std. dev.",
)
ax.set(
xlabel="False Positive Rate",
ylabel="True Positive Rate",
title=f"Mean ROC curve with variability\n(Positive label '{target_names[1]}')",
)
ax.legend(loc="lower right")
plt.show()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.autograd import Function
from torch.nn import functional as F
class SigmoidGeometricMean(Function):
"""Forward and backward function of geometric mean of two sigmoid
functions.
This implementation with analytical gradient function substitutes
the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
original implementation incurs none during gradient backprapagation
if both x and y are very small values.
"""
@staticmethod
def forward(ctx, x, y):
x_sigmoid = x.sigmoid()
y_sigmoid = y.sigmoid()
z = (x_sigmoid * y_sigmoid).sqrt()
ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
return z
@staticmethod
def backward(ctx, grad_output):
x_sigmoid, y_sigmoid, z = ctx.saved_tensors
grad_x = grad_output * z * (1 - x_sigmoid) / 2
grad_y = grad_output * z * (1 - y_sigmoid) / 2
return grad_x, grad_y
sigmoid_geometric_mean = SigmoidGeometricMean.apply
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the shape (..., target_h, target_w).
Args:
source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or
(N, C, H, W).
target (Tensor | np.ndarray): The interpolation target with the shape
(..., target_h, target_w).
mode (str): Algorithm used for interpolation. The options are the
same as those in F.interpolate(). Default: ``'bilinear'``.
align_corners (bool): The same as the argument in F.interpolate().
Returns:
Tensor: The interpolated source Tensor.
"""
assert len(target.shape) >= 2
def _interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` (4D) to the shape of the `target`."""
target_h, target_w = target.shape[-2:]
source_h, source_w = source.shape[-2:]
if target_h != source_h or target_w != source_w:
source = F.interpolate(
source,
size=(target_h, target_w),
mode=mode,
align_corners=align_corners)
return source
if len(source.shape) == 3:
source = source[:, None, :, :]
source = _interpolate_as(source, target, mode, align_corners)
return source[:, 0, :, :]
else:
return _interpolate_as(source, target, mode, align_corners)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch.nn import functional as F
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the shape (..., target_h, target_w).
Args:
source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or
(N, C, H, W).
target (Tensor | np.ndarray): The interpolation target with the shape
(..., target_h, target_w).
mode (str): Algorithm used for interpolation. The options are the
same as those in F.interpolate(). Default: ``'bilinear'``.
align_corners (bool): The same as the argument in F.interpolate().
Returns:
Tensor: The interpolated source Tensor.
"""
assert len(target.shape) >= 2
def _interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` (4D) to the shape of the `target`."""
target_h, target_w = target.shape[-2:]
source_h, source_w = source.shape[-2:]
if target_h != source_h or target_w != source_w:
source = F.interpolate(
source,
size=(target_h, target_w),
mode=mode,
align_corners=align_corners)
return source
if len(source.shape) == 3:
source = source[:, None, :, :]
source = _interpolate_as(source, target, mode, align_corners)
return source[:, 0, :, :]
else:
return _interpolate_as(source, target, mode, align_corners)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog): # type: ignore[override]
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
import json
from json import JSONDecodeError
from typing import List, Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog): # type: ignore[override]
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[List[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
actions: List = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: List[Generation], *, partial: bool = False
) -> Union[List[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[List[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocumentArray
from docarray.array.array.array import DocumentArray
def filter(
docs: AnyDocumentArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocumentArray:
"""
Filter the Documents in the index according to the given filter query.
EXAMPLE USAGE
.. code-block:: python
from docarray import DocumentArray, BaseDocument
from docarray.documents import Text, Image
from docarray.util.filter import filter
class MyDocument(BaseDocument):
caption: Text
image: Image
price: int
docs = DocumentArray[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
image=Image(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle', image=Image(url='turtlepic.png'), price=50
),
MyDocument(
caption='A couple birdwatching with binoculars',
image=Image(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'image__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].image.url == 'binocularsphoto.png'
:param docs: the DocumentArray where to apply the filter
:param query: the query to filter by
:return: A DocumentArray containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocumentArray.__class_getitem__(docs.document_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
import json
from typing import Dict, List, Union
from docarray.array.abstract_array import AnyDocumentArray
from docarray.array.array.array import DocumentArray
def filter(
docs: AnyDocumentArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocumentArray:
"""
Filter the Documents in the index according to the given filter query.
EXAMPLE USAGE
.. code-block:: python
from docarray import DocumentArray, BaseDocument
from docarray.documents import Text, Image
from docarray.util.filter import filter
class MyDocument(BaseDocument):
caption: Text
image: Image
price: int
docs = DocumentArray[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
image=Image(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle', image=Image(url='turtlepic.png'), price=50
),
MyDocument(
caption='A couple birdwatching with binoculars',
image=Image(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'image.url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].image.url == 'binocularsphoto.png'
:param docs: the DocumentArray where to apply the filter
:param query: the query to filter by
:return: A DocumentArray containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocumentArray(d for d in docs if parser.evaluate(d))
else:
return docs
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
|
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocArray[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocArray.from_protobuf(da.to_protobuf())
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
def test_top_k_descending_false():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([1, 2, 2]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([0, 2, 6])) or (
tnp.allclose(tnp.squeeze.indices.tensor),
tf.constant([0, 6, 2]),
)
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([1, 2, 2]))
assert tnp.allclose(indices.tensor[0], tf.constant([0, 2, 6])) or tnp.allclose(
indices.tensor[0], tf.constant([0, 6, 2])
)
assert tnp.allclose(vals.tensor[1], tf.constant([2, 3, 4]))
assert tnp.allclose(indices.tensor[1], tf.constant([2, 4, 6]))
@pytest.mark.tensorflow
def test_top_k_descending_true():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([9, 7, 4]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([5, 3, 1]))
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([9, 7, 4]))
assert tnp.allclose(indices.tensor[0], tf.constant([5, 3, 1]))
assert tnp.allclose(vals.tensor[1], tf.constant([11, 10, 7]))
assert tnp.allclose(indices.tensor[1], tf.constant([0, 5, 3]))
|
import pytest
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
def test_top_k_descending_false():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([1, 2, 2]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([0, 2, 6])) or (
tnp.allclose(tnp.squeeze.indices.tensor),
tf.constant([0, 6, 2]),
)
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([1, 2, 2]))
assert tnp.allclose(indices.tensor[0], tf.constant([0, 2, 6])) or tnp.allclose(
indices.tensor[0], tf.constant([0, 6, 2])
)
assert tnp.allclose(vals.tensor[1], tf.constant([2, 3, 4]))
assert tnp.allclose(indices.tensor[1], tf.constant([2, 4, 6]))
@pytest.mark.tensorflow
def test_top_k_descending_true():
top_k = TensorFlowCompBackend.Retrieval.top_k
a = TensorFlowTensor(tf.constant([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert tnp.allclose(tnp.squeeze(vals.tensor), tf.constant([9, 7, 4]))
assert tnp.allclose(tnp.squeeze(indices.tensor), tf.constant([5, 3, 1]))
a = TensorFlowTensor(tf.constant([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert tnp.allclose(vals.tensor[0], tf.constant([9, 7, 4]))
assert tnp.allclose(indices.tensor[0], tf.constant([5, 3, 1]))
assert tnp.allclose(vals.tensor[1], tf.constant([11, 10, 7]))
assert tnp.allclose(indices.tensor[1], tf.constant([0, 5, 3]))
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Any, Callable, Optional, cast
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.llm import LLMChain
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
def default_get_input(query: str, doc: Document) -> dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
class LLMChainExtractor(BaseDocumentCompressor):
"""Document compressor that uses an LLM chain to extract
the relevant parts of documents."""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast(str, output), metadata=doc.metadata)
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input) # type: ignore[arg-type]
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.gel import GelKVStore
class GelIndexStore(KVIndexStore):
"""
Gel Index store.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a GelIndexStore."""
super().__init__(
gel_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.gel import GelKVStore
class GelIndexStore(KVIndexStore):
"""Gel Index store.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a GelIndexStore."""
super().__init__(
gel_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from docarray.utils._internal.pydantic import is_pydantic_v2
if TYPE_CHECKING:
if is_pydantic_v2:
from pydantic import GetCoreSchemaHandler
from pydantic_core import core_schema
from docarray.base_doc.base_node import BaseNode
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def _docarray_validate(cls: Type[T], value: Any) -> T:
...
if is_pydantic_v2:
@classmethod
def validate(cls: Type[T], value: Any, _: Any) -> T:
return cls._docarray_validate(value)
else:
@classmethod
def validate(
cls: Type[T],
value: Any,
) -> T:
return cls._docarray_validate(value)
if is_pydantic_v2:
@classmethod
@abstractmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'
) -> 'core_schema.CoreSchema':
...
|
from abc import abstractmethod
from typing import Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_doc.base_node import BaseNode
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
|
import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import GatewayProtocolType, PodRoleType
from jina.parsers.helper import _set_gateway_uses
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol'):
_set_gateway_uses(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import GatewayProtocolType, PodRoleType
from jina.parsers.helper import _set_gateway_uses
if TYPE_CHECKING:
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol'):
_set_gateway_uses(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
import warnings
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
AUDIO_FILE_FORMATS = ['wav']
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_audio_extension = any(ext in url for ext in AUDIO_FILE_FORMATS)
if not has_audio_extension:
raise ValueError(
f'Audio URL must have one of the following extensions:'
f'{AUDIO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T) -> np.ndarray:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
AUDIO_FILE_FORMATS = ['wav']
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_audio_extension = any(ext in url for ext in AUDIO_FILE_FORMATS)
if not has_audio_extension:
raise ValueError(
f'Audio URL must have one of the following extensions:'
f'{AUDIO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T) -> np.ndarray:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import PretrainedModelFileDoesNotExist
from tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def basic_encoder() -> TFIDFTextEncoder:
return TFIDFTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TFIDFTextEncoder'
def test_error_no_file():
with pytest.raises(PretrainedModelFileDoesNotExist):
TFIDFTextEncoder(path_vectorizer='does/not/exist')
def test_no_document(basic_encoder: TFIDFTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_tfidf_text_encoder(basic_encoder: TFIDFTextEncoder):
doc = Document(text='Han likes eating pizza')
docarray = DocumentArray([doc])
basic_encoder.encode(docarray, parameters={})
embedding = doc.embedding
assert embedding.shape == (1, _EMBEDDING_DIM)
assert embedding.size == 4
def test_tfidf_text_encoder_batch(basic_encoder: TFIDFTextEncoder):
# Input
text_batch = ['Han likes eating pizza', 'Han likes pizza', 'Jina rocks']
# Encoder embedding
docarray = DocumentArray([Document(text=text) for text in text_batch])
basic_encoder.encode(docarray, parameters={})
embeddeding_batch = scipy.sparse.vstack(docarray.get_attributes('embedding'))
assert embeddeding_batch.shape == (3, _EMBEDDING_DIM)
assert embeddeding_batch.size == 8
embs = np.asarray(embeddeding_batch.todense())
# They overlap in Han
assert (embs[0] * embs[1]).sum() > 0.1
# They do not overlap
assert (embs[0] * embs[2]).sum() == 0
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TFIDFTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TFIDFTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (1, _EMBEDDING_DIM)
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import PretrainedModelFileDoesNotExist
from ...tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def basic_encoder() -> TFIDFTextEncoder:
return TFIDFTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TFIDFTextEncoder'
def test_error_no_file():
with pytest.raises(PretrainedModelFileDoesNotExist):
TFIDFTextEncoder(path_vectorizer='does/not/exist')
def test_no_document(basic_encoder: TFIDFTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_tfidf_text_encoder(basic_encoder: TFIDFTextEncoder):
doc = Document(text='Han likes eating pizza')
docarray = DocumentArray([doc])
basic_encoder.encode(docarray, parameters={})
embedding = doc.embedding
assert embedding.shape == (1, _EMBEDDING_DIM)
assert embedding.size == 4
def test_tfidf_text_encoder_batch(basic_encoder: TFIDFTextEncoder):
# Input
text_batch = ['Han likes eating pizza', 'Han likes pizza', 'Jina rocks']
# Encoder embedding
docarray = DocumentArray([Document(text=text) for text in text_batch])
basic_encoder.encode(docarray, parameters={})
embeddeding_batch = scipy.sparse.vstack(docarray.get_attributes('embedding'))
assert embeddeding_batch.shape == (3, _EMBEDDING_DIM)
assert embeddeding_batch.size == 8
embs = np.asarray(embeddeding_batch.todense())
# They overlap in Han
assert (embs[0] * embs[1]).sum() > 0.1
# They do not overlap
assert (embs[0] * embs[2]).sum() == 0
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TFIDFTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TFIDFTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (1, _EMBEDDING_DIM)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
__all__ = [
'get_root_logger',
'collect_env',
'find_latest_checkpoint',
]
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.21.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.21.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ClampKeyPoints, ConvertBoundingBoxFormat, SetClampingMode
from ._misc import (
ConvertImageDtype,
GaussianBlur,
GaussianNoise,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._utils import check_type, get_bounding_boxes, has_all, has_any, query_chw, query_size
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ClampKeyPoints, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
GaussianNoise,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._utils import check_type, get_bounding_boxes, has_all, has_any, query_chw, query_size
from ._deprecated import ToTensor # usort: skip
|
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_monotone_constraints as tmc
rng = np.random.RandomState(1994)
def non_decreasing(L):
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
def non_increasing(L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
def assert_constraint(constraint, tree_method):
from sklearn.datasets import make_regression
n = 1000
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
dtrain = xgb.DMatrix(X, y)
param = {}
param["tree_method"] = tree_method
param["monotone_constraints"] = "(" + str(constraint) + ")"
bst = xgb.train(param, dtrain)
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
pred = bst.predict(dpredict)
if constraint > 0:
assert non_decreasing(pred)
elif constraint < 0:
assert non_increasing(pred)
@pytest.mark.skipif(**tm.no_sklearn())
def test_gpu_hist_basic():
assert_constraint(1, "gpu_hist")
assert_constraint(-1, "gpu_hist")
def test_gpu_hist_depthwise():
params = {
"tree_method": "gpu_hist",
"grow_policy": "depthwise",
"monotone_constraints": "(1, -1)",
}
model = xgb.train(params, tmc.training_dset)
tmc.is_correctly_constrained(model)
def test_gpu_hist_lossguide():
params = {
"tree_method": "gpu_hist",
"grow_policy": "lossguide",
"monotone_constraints": "(1, -1)",
}
model = xgb.train(params, tmc.training_dset)
tmc.is_correctly_constrained(model)
|
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_monotone_constraints as tmc
rng = np.random.RandomState(1994)
def non_decreasing(L):
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
def non_increasing(L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
def assert_constraint(constraint, tree_method):
from sklearn.datasets import make_regression
n = 1000
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
dtrain = xgb.DMatrix(X, y)
param = {}
param['tree_method'] = tree_method
param['monotone_constraints'] = "(" + str(constraint) + ")"
bst = xgb.train(param, dtrain)
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
pred = bst.predict(dpredict)
if constraint > 0:
assert non_decreasing(pred)
elif constraint < 0:
assert non_increasing(pred)
@pytest.mark.skipif(**tm.no_sklearn())
def test_gpu_hist_basic():
assert_constraint(1, 'gpu_hist')
assert_constraint(-1, 'gpu_hist')
def test_gpu_hist_depthwise():
params = {
'tree_method': 'gpu_hist',
'grow_policy': 'depthwise',
'monotone_constraints': '(1, -1)'
}
model = xgb.train(params, tmc.training_dset)
tmc.is_correctly_constrained(model)
def test_gpu_hist_lossguide():
params = {
'tree_method': 'gpu_hist',
'grow_policy': 'lossguide',
'monotone_constraints': '(1, -1)'
}
model = xgb.train(params, tmc.training_dset)
tmc.is_correctly_constrained(model)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize as deserialize
from keras.src.losses import get as get
from keras.src.losses import serialize as serialize
from keras.src.losses.loss import Loss as Loss
from keras.src.losses.losses import CTC as CTC
from keras.src.losses.losses import BinaryCrossentropy as BinaryCrossentropy
from keras.src.losses.losses import (
BinaryFocalCrossentropy as BinaryFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalCrossentropy as CategoricalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalFocalCrossentropy as CategoricalFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalGeneralizedCrossEntropy as CategoricalGeneralizedCrossEntropy,
)
from keras.src.losses.losses import CategoricalHinge as CategoricalHinge
from keras.src.losses.losses import Circle as Circle
from keras.src.losses.losses import CosineSimilarity as CosineSimilarity
from keras.src.losses.losses import Dice as Dice
from keras.src.losses.losses import Hinge as Hinge
from keras.src.losses.losses import Huber as Huber
from keras.src.losses.losses import KLDivergence as KLDivergence
from keras.src.losses.losses import LogCosh as LogCosh
from keras.src.losses.losses import MeanAbsoluteError as MeanAbsoluteError
from keras.src.losses.losses import (
MeanAbsolutePercentageError as MeanAbsolutePercentageError,
)
from keras.src.losses.losses import MeanSquaredError as MeanSquaredError
from keras.src.losses.losses import (
MeanSquaredLogarithmicError as MeanSquaredLogarithmicError,
)
from keras.src.losses.losses import Poisson as Poisson
from keras.src.losses.losses import (
SparseCategoricalCrossentropy as SparseCategoricalCrossentropy,
)
from keras.src.losses.losses import SquaredHinge as SquaredHinge
from keras.src.losses.losses import Tversky as Tversky
from keras.src.losses.losses import binary_crossentropy as binary_crossentropy
from keras.src.losses.losses import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.losses.losses import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_generalized_cross_entropy as categorical_generalized_cross_entropy,
)
from keras.src.losses.losses import categorical_hinge as categorical_hinge
from keras.src.losses.losses import circle as circle
from keras.src.losses.losses import cosine_similarity as cosine_similarity
from keras.src.losses.losses import ctc as ctc
from keras.src.losses.losses import dice as dice
from keras.src.losses.losses import hinge as hinge
from keras.src.losses.losses import huber as huber
from keras.src.losses.losses import kl_divergence as kl_divergence
from keras.src.losses.losses import log_cosh as log_cosh
from keras.src.losses.losses import mean_absolute_error as mean_absolute_error
from keras.src.losses.losses import (
mean_absolute_percentage_error as mean_absolute_percentage_error,
)
from keras.src.losses.losses import mean_squared_error as mean_squared_error
from keras.src.losses.losses import (
mean_squared_logarithmic_error as mean_squared_logarithmic_error,
)
from keras.src.losses.losses import poisson as poisson
from keras.src.losses.losses import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.losses.losses import squared_hinge as squared_hinge
from keras.src.losses.losses import tversky as tversky
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalGeneralizedCrossEntropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import Circle
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_generalized_cross_entropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import circle
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
|
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Sequence, Union
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.llms.llm import ToolSelection
from llama_index.core.program.function_program import FunctionTool, get_function_tool
from llama_index.core.tools.types import BaseTool
from pydantic import BaseModel, Field
class MockFunctionCallingLLM(FunctionCallingLLM):
def __init__(self, tool_selection: List[ToolSelection]):
super().__init__()
self._tool_selection = tool_selection
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, ChatResponse]:
return ChatResponse(message=ChatMessage(role="user", content=""))
def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, CompletionResponse]:
pass
def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[ChatResponse, None]]:
pass
def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[CompletionResponse, None]]:
pass
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
return ChatResponse(message=ChatMessage(role="user", content=""))
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
pass
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
pass
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> ChatResponseGen:
pass
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
def _prepare_chat_with_tools(
self,
tools: Sequence["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
return {"messages": []}
def get_tool_calls_from_response(
self,
response: ChatResponse,
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
return self._tool_selection
class Person(BaseModel):
name: str = Field(description="Person name")
@pytest.fixture()
def person_tool() -> FunctionTool:
return get_function_tool(Person)
@pytest.fixture()
def person_tool_selection(person_tool: FunctionTool) -> ToolSelection:
return ToolSelection(
tool_id="",
tool_name=person_tool.metadata.name,
tool_kwargs={},
)
def test_predict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = llm.predict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
def test_predict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
llm.predict_and_call(tools=[person_tool], error_on_tool_error=True)
@pytest.mark.asyncio
async def test_apredict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = await llm.apredict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
@pytest.mark.asyncio
async def test_apredict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
await llm.apredict_and_call(tools=[person_tool], error_on_tool_error=True)
|
from typing import Any, AsyncGenerator, Coroutine, Dict, List, Optional, Sequence, Union
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.llms.llm import ToolSelection
from llama_index.core.program.function_program import FunctionTool, get_function_tool
from llama_index.core.tools.types import BaseTool
from pydantic import BaseModel, Field
class MockFunctionCallingLLM(FunctionCallingLLM):
def __init__(self, tool_selection: List[ToolSelection]):
super().__init__()
self._tool_selection = tool_selection
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, ChatResponse]:
return ChatResponse(message=ChatMessage(role="user", content=""))
def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, CompletionResponse]:
pass
def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[ChatResponse, None]]:
pass
def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, AsyncGenerator[CompletionResponse, None]]:
pass
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
return ChatResponse(message=ChatMessage(role="user", content=""))
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
pass
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
pass
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> ChatResponseGen:
pass
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
def _prepare_chat_with_tools(
self,
tools: Sequence["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
return {"messages": []}
def get_tool_calls_from_response(
self,
response: ChatResponse,
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
return self._tool_selection
class Person(BaseModel):
name: str = Field(description="Person name")
@pytest.fixture()
def person_tool() -> FunctionTool:
return get_function_tool(Person)
@pytest.fixture()
def person_tool_selection(person_tool: FunctionTool) -> ToolSelection:
return ToolSelection(
tool_id="",
tool_name=person_tool.metadata.name,
tool_kwargs={},
)
def test_predict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = llm.predict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
def test_predict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test predict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
llm.predict_and_call(tools=[person_tool], error_on_tool_error=True)
@pytest.mark.asyncio()
async def test_apredict_and_call(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will return ToolOutput with error rather than raising one."""
llm = MockFunctionCallingLLM([person_tool_selection])
response = await llm.apredict_and_call(tools=[person_tool])
assert all(tool_output.is_error for tool_output in response.sources)
@pytest.mark.asyncio()
async def test_apredict_and_call_throws_if_error_on_tool(
person_tool: FunctionTool, person_tool_selection: ToolSelection
) -> None:
"""Test apredict_and_call will raise an error."""
llm = MockFunctionCallingLLM([person_tool_selection])
with pytest.raises(ValueError):
await llm.apredict_and_call(tools=[person_tool], error_on_tool_error=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
from typing import Dict, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain_community.tools.jira.tool import JiraAction
from langchain_community.utilities.jira import JiraAPIWrapper
class JiraToolkit(BaseToolkit):
"""Jira Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
See https://python.langchain.com/docs/security for more information.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
"""Create a JiraToolkit from a JiraAPIWrapper.
Args:
jira_api_wrapper: JiraAPIWrapper. The Jira API wrapper.
Returns:
JiraToolkit. The Jira toolkit.
"""
operations: List[Dict] = [
{
"mode": "jql",
"name": "jql_query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "get_projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "create_issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "catch_all_jira_api",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "create_confluence_page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in operations
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
from typing import Dict, List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain_community.tools.jira.tool import JiraAction
from langchain_community.utilities.jira import JiraAPIWrapper
class JiraToolkit(BaseToolkit):
"""Jira Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
See https://python.langchain.com/docs/security for more information.
Parameters:
tools: List[BaseTool]. The tools in the toolkit. Default is an empty list.
"""
tools: List[BaseTool] = []
@classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
"""Create a JiraToolkit from a JiraAPIWrapper.
Args:
jira_api_wrapper: JiraAPIWrapper. The Jira API wrapper.
Returns:
JiraToolkit. The Jira toolkit.
"""
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in operations
]
return cls(tools=tools) # type: ignore[arg-type]
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.searcher.compound.FaissPostgresIndexer import FaissPostgresIndexer
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_integration_parallel(docker_compose):
# test issue reported by @florian
SHARDS = 3
with Flow().add(
uses='FaissPostgresIndexer', shards=SHARDS, uses_with={'total_shards': 3}
) as f:
f.index(Document())
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.searcher.compound.FaissPostgresSearcher import (
FaissPostgresSearcher,
)
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_integration_parallel(docker_compose):
# test issue reported by @florian
SHARDS = 3
with Flow().add(
uses='FaissPostgresSearcher', shards=SHARDS, uses_with={'total_shards': 3}
) as f:
f.index(Document())
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from jina.clients.request import request_generator
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyFastAPIGateway(FastAPIBaseGateway):
def __init__(
self,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
default_health_check: bool = False,
**kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.default_health_check = default_health_check
@property
def app(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
if not self.default_health_check:
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.rpc_stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
return app
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from jina.clients.request import request_generator
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyFastAPIGateway(FastAPIBaseGateway):
def __init__(
self,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
default_health_check: bool = False,
**kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.default_health_check = default_health_check
@property
def app(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
if not self.default_health_check:
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
return app
|
import re
from typing import Any
from langchain.evaluation.schema import StringEvaluator
class RegexMatchStringEvaluator(StringEvaluator):
"""Compute a regex match between the prediction and the reference.
Examples
----------
>>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE)
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^mindy.*cto$",
) # This will return {'score': 1.0} due to the IGNORECASE flag
>>> evaluator = RegexMatchStringEvaluator()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$",
) # This will return {'score': 0.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$|^Mindy.*CTO$",
) # This will return {'score': 1.0} as the prediction matches the second pattern in the union
""" # noqa: E501
def __init__(self, *, flags: int = 0, **kwargs: Any): # Default is no flags
super().__init__()
self.flags = flags
@property
def requires_input(self) -> bool:
"""
This evaluator does not require input.
"""
return False
@property
def requires_reference(self) -> bool:
"""
This evaluator requires a reference.
"""
return True
@property
def input_keys(self) -> list[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return "regex_match"
def _evaluate_strings( # type: ignore[arg-type,override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""
Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score.
"""
match = re.match(reference, prediction, flags=self.flags)
return {"score": int(bool(match))}
|
import re
from typing import Any, List
from langchain.evaluation.schema import StringEvaluator
class RegexMatchStringEvaluator(StringEvaluator):
"""Compute a regex match between the prediction and the reference.
Examples
----------
>>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE)
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^mindy.*cto$",
) # This will return {'score': 1.0} due to the IGNORECASE flag
>>> evaluator = RegexMatchStringEvaluator()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$",
) # This will return {'score': 0.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$|^Mindy.*CTO$",
) # This will return {'score': 1.0} as the prediction matches the second pattern in the union
""" # noqa: E501
def __init__(self, *, flags: int = 0, **kwargs: Any): # Default is no flags
super().__init__()
self.flags = flags
@property
def requires_input(self) -> bool:
"""
This evaluator does not require input.
"""
return False
@property
def requires_reference(self) -> bool:
"""
This evaluator requires a reference.
"""
return True
@property
def input_keys(self) -> List[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return "regex_match"
def _evaluate_strings( # type: ignore[arg-type,override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""
Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score.
"""
match = re.match(reference, prediction, flags=self.flags)
return {"score": int(bool(match))}
|
"""**Index** is used to avoid writing duplicated content
into the vectostore and to avoid over-writing content if it's unchanged.
Indexes also :
* Create knowledge graphs from data.
* Support indexing workflows from LangChain data loaders to vectorstores.
Importantly, Index keeps on working even if the content being written is derived
via a set of transformations from some source content (e.g., indexing children
documents that were derived from parent documents by chunking.)
"""
from typing import TYPE_CHECKING, Any
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain._api import create_importer
from langchain.indexes._sql_record_manager import SQLRecordManager
from langchain.indexes.vectorstore import VectorstoreIndexCreator
if TYPE_CHECKING:
from langchain_community.graphs.index_creator import GraphIndexCreator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GraphIndexCreator": "langchain_community.graphs.index_creator",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GraphIndexCreator",
"IndexingResult",
"SQLRecordManager",
"VectorstoreIndexCreator",
# Keep sorted
"aindex",
"index",
]
|
"""**Index** is used to avoid writing duplicated content
into the vectostore and to avoid over-writing content if it's unchanged.
Indexes also :
* Create knowledge graphs from data.
* Support indexing workflows from LangChain data loaders to vectorstores.
Importantly, Index keeps on working even if the content being written is derived
via a set of transformations from some source content (e.g., indexing children
documents that were derived from parent documents by chunking.)
"""
from typing import TYPE_CHECKING, Any
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain._api import create_importer
from langchain.indexes._sql_record_manager import SQLRecordManager
from langchain.indexes.vectorstore import VectorstoreIndexCreator
if TYPE_CHECKING:
from langchain_community.graphs.index_creator import GraphIndexCreator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GraphIndexCreator": "langchain_community.graphs.index_creator",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
# Keep sorted
"aindex",
"GraphIndexCreator",
"index",
"IndexingResult",
"SQLRecordManager",
"VectorstoreIndexCreator",
]
|
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import TextDoc
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDoc):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import TextDoc
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDocument):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
import respx
@pytest.fixture(autouse=True)
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get(
"https://test_url/v1/models",
json={
"data": [
{"id": "model1"},
]
},
)
@pytest.mark.integration
def test_available_models(mode: dict) -> None:
models = NVIDIARerank(**mode).available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
import respx
@pytest.fixture(autouse=True)
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get(
"https://test_url/v1/models",
json={
"data": [
{"id": "model1"},
]
},
)
@pytest.mark.integration()
def test_available_models(mode: dict) -> None:
models = NVIDIARerank(**mode).available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
preprocess_cfg=preprocess_cfg,
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = (
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
)
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = [
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
]
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s"):
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s"):
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
# test independent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 60, 128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(4, 224, 224))
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(100, 1))
# test dependent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
_ = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60, 128))
with pytest.raises(ValueError):
_ = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
MyTT = TorchTensor[3, 224, 224]
tensor = parse_obj_as(MyTT, torch.zeros(shape))
assert MyTT.__name__ == 'TorchTensor[3, 224, 224]'
assert MyTT.__qualname__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__name__ == 'TorchTensor'
assert tensor.__class__.__qualname__ == 'TorchTensor'
assert f'{tensor[0][0][0]}' == 'TorchTensor(0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
assert not isinstance(t, TorchTensor[2, 128])
assert not isinstance(t, TorchTensor[2, 2, 64])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert (t1 == t2).all()
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
# test independent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 60, 128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(4, 224, 224))
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(100, 1))
# test dependent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60, 128))
with pytest.raises(ValueError):
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
MyTT = TorchTensor[3, 224, 224]
tensor = parse_obj_as(MyTT, torch.zeros(shape))
assert MyTT.__name__ == 'TorchTensor[3, 224, 224]'
assert MyTT.__qualname__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__name__ == 'TorchTensor'
assert tensor.__class__.__qualname__ == 'TorchTensor'
assert f'{tensor[0][0][0]}' == 'TorchTensor(0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
assert not isinstance(t, TorchTensor[2, 128])
assert not isinstance(t, TorchTensor[2, 2, 64])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert (t1 == t2).all()
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: BoundingBoxFormat, canvas_size: Tuple[int, int]) -> BoundingBoxes: # type: ignore[override]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def wrap_like(
cls,
other: BoundingBoxes,
tensor: torch.Tensor,
*,
format: Optional[BoundingBoxFormat] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> BoundingBoxes:
"""Wrap a :class:`torch.Tensor` as :class:`BoundingBoxes` from a reference.
Args:
other (BoundingBoxes): Reference bounding box.
tensor (Tensor): Tensor to be wrapped as :class:`BoundingBoxes`
format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the
reference.
canvas_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If
omitted, it is taken from the reference.
"""
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
return cls._wrap(
tensor,
format=format if format is not None else other.format,
canvas_size=canvas_size if canvas_size is not None else other.canvas_size,
)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: BoundingBoxFormat, canvas_size: Tuple[int, int]) -> BoundingBoxes:
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def wrap_like(
cls,
other: BoundingBoxes,
tensor: torch.Tensor,
*,
format: Optional[BoundingBoxFormat] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> BoundingBoxes:
"""Wrap a :class:`torch.Tensor` as :class:`BoundingBoxes` from a reference.
Args:
other (BoundingBoxes): Reference bounding box.
tensor (Tensor): Tensor to be wrapped as :class:`BoundingBoxes`
format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the
reference.
canvas_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If
omitted, it is taken from the reference.
"""
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
return cls._wrap(
tensor,
format=format if format is not None else other.format,
canvas_size=canvas_size if canvas_size is not None else other.canvas_size,
)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cross Entropy Loss for a CrossEncoder model. This loss is used to train a model to predict the
correct class label for a given pair of sentences. The number of classes should be equal to the number of model
output labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss. Defaults to :class:`~torch.nn.Identity`.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.CrossEntropyLoss`.
References:
- :class:`torch.nn.CrossEntropyLoss`
- `Cross Encoder > Training Examples > Natural Language Inference <../../../examples/cross_encoder/training/nli/README.html>`_
Requirements:
1. Your model can be initialized with `num_labels > 1` to predict multiple classes.
2. The number of dataset classes should be equal to the number of model output labels (`model.num_labels`).
Inputs:
+-------------------------------------------------+--------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=================================================+========+===============================+
| (sentence_A, sentence_B) pairs | class | `num_classes` |
+-------------------------------------------------+--------+-------------------------------+
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
model = CrossEncoder("microsoft/mpnet-base", num_labels=2)
train_dataset = Dataset.from_dict({
"sentence1": ["How can I be a good geologist?", "What is the capital of France?"],
"sentence2": ["What should I do to be a great geologist?", "What is the capital of Germany?"],
"label": [1, 0], # 1: duplicate, 0: not duplicate
})
loss = losses.CrossEntropyLoss(model)
trainer = CrossEncoderTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fct(logits)
loss = self.ce_loss(logits, labels)
return loss
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cross Entropy Loss for a CrossEncoder model. This loss is used to train a model to predict the
correct class label for a given pair of sentences. The number of classes should be equal to the number of model
output labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss. Defaults to :class:`~torch.nn.Identity`.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.CrossEntropyLoss`.
References:
- :class:`torch.nn.CrossEntropyLoss`
Requirements:
1. Your model can be initialized with `num_labels > 1` to predict multiple classes.
2. The number of dataset classes should be equal to the number of model output labels (`model.num_labels`).
Inputs:
+-------------------------------------------------+--------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=================================================+========+===============================+
| (sentence_A, sentence_B) pairs | class | `num_classes` |
+-------------------------------------------------+--------+-------------------------------+
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
model = CrossEncoder("microsoft/mpnet-base", num_labels=2)
train_dataset = Dataset.from_dict({
"sentence1": ["How can I be a good geologist?", "What is the capital of France?"],
"sentence2": ["What should I do to be a great geologist?", "What is the capital of Germany?"],
"label": [1, 0], # 1: duplicate, 0: not duplicate
})
loss = losses.CrossEntropyLoss(model)
trainer = CrossEncoderTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fct(logits)
loss = self.ce_loss(logits, labels)
return loss
|
import torchaudio
from torchaudio.prototype.pipelines import VGGISH
def test_vggish():
input_sr = VGGISH.sample_rate
input_proc = VGGISH.get_input_processor()
model = VGGISH.get_model()
path = torchaudio.utils.download_asset("test-assets/Chopin_Ballade_-1_In_G_Minor,_Op._23_excerpt.mp3")
waveform, sr = torchaudio.load(path, backend="ffmpeg")
waveform = waveform.mean(axis=0)
waveform = torchaudio.functional.resample(waveform, sr, input_sr)
batch = input_proc(waveform)
assert batch.shape == (62, 1, 96, 64)
output = model(batch)
assert output.shape == (62, 128)
|
import unittest
import torchaudio
from torchaudio.prototype.pipelines import VGGISH
class VGGishPipelineTest(unittest.TestCase):
def test_vggish(self):
input_sr = VGGISH.sample_rate
input_proc = VGGISH.get_input_processor()
model = VGGISH.get_model()
path = torchaudio.utils.download_asset("test-assets/Chopin_Ballade_-1_In_G_Minor,_Op._23_excerpt.mp3")
waveform, sr = torchaudio.load(path, backend="ffmpeg")
waveform = waveform.mean(axis=0)
waveform = torchaudio.functional.resample(waveform, sr, input_sr)
batch = input_proc(waveform)
assert batch.shape == (62, 1, 96, 64)
output = model(batch)
assert output.shape == (62, 128)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .priority import Priority, get_priority
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'get_priority', 'Priority', 'find_latest_checkpoint',
'autocast'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'find_latest_checkpoint', 'autocast'
]
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from docarray.typing.url.url_3d.mesh_url import Mesh3DLoadResult
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
vertices, faces = url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(vertices, NdArray)
assert isinstance(faces, np.ndarray)
assert isinstance(faces, NdArray)
assert vertices.shape[1] == 3
assert faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', [f for f in Mesh3DLoadResult._fields])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from docarray.typing.url.url_3d.mesh_url import Mesh3DLoadResult
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
vertices, faces = url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(vertices, NdArray)
assert isinstance(faces, np.ndarray)
assert isinstance(faces, NdArray)
assert vertices.shape[1] == 3
assert faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', [f for f in Mesh3DLoadResult._fields])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='Mesh3DUrl'):
parse_obj_as(Mesh3DUrl, path_to_file)
else:
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain") as bb:
bb.functions["io.TextIOWrapper.read"].can_block_in(
"langchain/__init__.py", "<module>"
)
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
parser.addoption(
"--community",
action="store_true",
dest="community",
default=False,
help="enable running unite tests that require community",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if not config.getoption("--community"):
skip_community = pytest.mark.skip(reason="need --community option to run")
for item in items:
if "community" in item.keywords:
item.add_marker(skip_community)
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
"""Configuration for unit tests."""
from collections.abc import Iterator
from importlib import util
from typing import Dict, Sequence
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain") as bb:
bb.functions["io.TextIOWrapper.read"].can_block_in(
"langchain/__init__.py", "<module>"
)
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
parser.addoption(
"--community",
action="store_true",
dest="community",
default=False,
help="enable running unite tests that require community",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if not config.getoption("--community"):
skip_community = pytest.mark.skip(reason="need --community option to run")
for item in items:
if "community" in item.keywords:
item.add_marker(skip_community)
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
import pytest
from backend.util.request import pin_url, validate_url
@pytest.mark.parametrize(
"raw_url, trusted_origins, expected_value, should_raise",
[
# Rejected IP ranges
("localhost", [], None, True),
("192.168.1.1", [], None, True),
("127.0.0.1", [], None, True),
("0.0.0.0", [], None, True),
# Normal URLs (should default to http:// if no scheme provided)
("google.com/a?b=c", [], "http://google.com/a?b=c", False),
("github.com?key=!@!@", [], "http://github.com?key=!@!@", False),
# Scheme Enforcement
("ftp://example.com", [], None, True),
("file://example.com", [], None, True),
# International domain converting to punycode (allowed if public)
("http://xn--exmple-cua.com", [], "http://xn--exmple-cua.com", False),
# Invalid domain (IDNA failure)
("http://exa◌mple.com", [], None, True),
# IPv6 addresses (loopback/blocked)
("::1", [], None, True),
("http://[::1]", [], None, True),
# Suspicious Characters in Hostname
("http://example_underscore.com", [], None, True),
("http://exa mple.com", [], None, True),
# Malformed URLs
("http://", [], None, True), # No hostname
("://missing-scheme", [], None, True), # Missing proper scheme
# Trusted Origins
(
"internal-api.company.com",
["internal-api.company.com", "10.0.0.5"],
"http://internal-api.company.com",
False,
),
("10.0.0.5", ["10.0.0.5"], "http://10.0.0.5", False),
# Special Characters in Path
(
"example.com/path%20with%20spaces",
[],
"http://example.com/path%20with%20spaces",
False,
),
# Backslashes should be replaced with forward slashes
("http://example.com\\backslash", [], "http://example.com/backslash", False),
# Check default-scheme behavior for valid domains
("example.com", [], "http://example.com", False),
("https://secure.com", [], "https://secure.com", False),
# Non-ASCII Characters in Query/Fragment
("example.com?param=äöü", [], "http://example.com?param=äöü", False),
],
)
async def test_validate_url_no_dns_rebinding(
raw_url: str, trusted_origins: list[str], expected_value: str, should_raise: bool
):
if should_raise:
with pytest.raises(ValueError):
await validate_url(raw_url, trusted_origins)
else:
validated_url, _, _ = await validate_url(raw_url, trusted_origins)
assert validated_url.geturl() == expected_value
@pytest.mark.parametrize(
"hostname, resolved_ips, expect_error, expected_ip",
[
# Multiple public IPs, none blocked
("public-example.com", ["8.8.8.8", "9.9.9.9"], False, "8.8.8.8"),
# Includes a blocked IP (e.g. link-local 169.254.x.x) => should raise
("rebinding.com", ["1.2.3.4", "169.254.169.254"], True, None),
# Single public IP
("single-public.com", ["8.8.8.8"], False, "8.8.8.8"),
# Single blocked IP
("blocked.com", ["127.0.0.1"], True, None),
],
)
async def test_dns_rebinding_fix(
monkeypatch,
hostname: str,
resolved_ips: list[str],
expect_error: bool,
expected_ip: str,
):
"""
Tests that validate_url pins the first valid public IP address, and rejects
the domain if any of the resolved IPs are blocked (i.e., DNS Rebinding scenario).
"""
def mock_getaddrinfo(host, port, *args, **kwargs):
# Simulate multiple IPs returned for the given hostname
return [(None, None, None, None, (ip, port)) for ip in resolved_ips]
# Patch socket.getaddrinfo so we control the DNS resolution in the test
monkeypatch.setattr("socket.getaddrinfo", mock_getaddrinfo)
if expect_error:
# If any IP is blocked, we expect a ValueError
with pytest.raises(ValueError):
url, _, ip_addresses = await validate_url(hostname, [])
pin_url(url, ip_addresses)
else:
url, _, ip_addresses = await validate_url(hostname, [])
pinned_url = pin_url(url, ip_addresses).geturl()
# The pinned_url should contain the first valid IP
assert pinned_url.startswith("http://") or pinned_url.startswith("https://")
assert expected_ip in pinned_url
# The unpinned URL's hostname should match our original IDNA encoded hostname
assert url.hostname == hostname
|
import pytest
from backend.util.request import pin_url, validate_url
@pytest.mark.parametrize(
"raw_url, trusted_origins, expected_value, should_raise",
[
# Rejected IP ranges
("localhost", [], None, True),
("192.168.1.1", [], None, True),
("127.0.0.1", [], None, True),
("0.0.0.0", [], None, True),
# Normal URLs (should default to http:// if no scheme provided)
("google.com/a?b=c", [], "http://google.com/a?b=c", False),
("github.com?key=!@!@", [], "http://github.com?key=!@!@", False),
# Scheme Enforcement
("ftp://example.com", [], None, True),
("file://example.com", [], None, True),
# International domain converting to punycode (allowed if public)
("http://xn--exmple-cua.com", [], "http://xn--exmple-cua.com", False),
# Invalid domain (IDNA failure)
("http://exa◌mple.com", [], None, True),
# IPv6 addresses (loopback/blocked)
("::1", [], None, True),
("http://[::1]", [], None, True),
# Suspicious Characters in Hostname
("http://example_underscore.com", [], None, True),
("http://exa mple.com", [], None, True),
# Malformed URLs
("http://", [], None, True), # No hostname
("://missing-scheme", [], None, True), # Missing proper scheme
# Trusted Origins
(
"internal-api.company.com",
["internal-api.company.com", "10.0.0.5"],
"http://internal-api.company.com",
False,
),
("10.0.0.5", ["10.0.0.5"], "http://10.0.0.5", False),
# Special Characters in Path
(
"example.com/path%20with%20spaces",
[],
"http://example.com/path%20with%20spaces",
False,
),
# Backslashes should be replaced with forward slashes
("http://example.com\\backslash", [], "http://example.com/backslash", False),
# Check default-scheme behavior for valid domains
("example.com", [], "http://example.com", False),
("https://secure.com", [], "https://secure.com", False),
# Non-ASCII Characters in Query/Fragment
("example.com?param=äöü", [], "http://example.com?param=äöü", False),
],
)
def test_validate_url_no_dns_rebinding(
raw_url: str, trusted_origins: list[str], expected_value: str, should_raise: bool
):
if should_raise:
with pytest.raises(ValueError):
validate_url(raw_url, trusted_origins)
else:
validated_url, _, _ = validate_url(raw_url, trusted_origins)
assert validated_url.geturl() == expected_value
@pytest.mark.parametrize(
"hostname, resolved_ips, expect_error, expected_ip",
[
# Multiple public IPs, none blocked
("public-example.com", ["8.8.8.8", "9.9.9.9"], False, "8.8.8.8"),
# Includes a blocked IP (e.g. link-local 169.254.x.x) => should raise
("rebinding.com", ["1.2.3.4", "169.254.169.254"], True, None),
# Single public IP
("single-public.com", ["8.8.8.8"], False, "8.8.8.8"),
# Single blocked IP
("blocked.com", ["127.0.0.1"], True, None),
],
)
def test_dns_rebinding_fix(
monkeypatch,
hostname: str,
resolved_ips: list[str],
expect_error: bool,
expected_ip: str,
):
"""
Tests that validate_url pins the first valid public IP address, and rejects
the domain if any of the resolved IPs are blocked (i.e., DNS Rebinding scenario).
"""
def mock_getaddrinfo(host, port, *args, **kwargs):
# Simulate multiple IPs returned for the given hostname
return [(None, None, None, None, (ip, port)) for ip in resolved_ips]
# Patch socket.getaddrinfo so we control the DNS resolution in the test
monkeypatch.setattr("socket.getaddrinfo", mock_getaddrinfo)
if expect_error:
# If any IP is blocked, we expect a ValueError
with pytest.raises(ValueError):
url, _, ip_addresses = validate_url(hostname, [])
pin_url(url, ip_addresses)
else:
url, _, ip_addresses = validate_url(hostname, [])
pinned_url = pin_url(url, ip_addresses).geturl()
# The pinned_url should contain the first valid IP
assert pinned_url.startswith("http://") or pinned_url.startswith("https://")
assert expected_ip in pinned_url
# The unpinned URL's hostname should match our original IDNA encoded hostname
assert url.hostname == hostname
|
_base_ = './mask-rcnn_hrnetv2p-w40_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Utilities for environment variables."""
from __future__ import annotations
import os
from typing import Any, Optional, Union
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def get_from_dict_or_env(
data: dict[str, Any],
key: Union[str, list[str]],
env_key: str,
default: Optional[str] = None,
) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
data: The dictionary to look up the key in.
key: The key to look up in the dictionary. This can be a list of keys to try
in order.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
"""
if isinstance(key, (list, tuple)):
for k in key:
if value := data.get(k):
return value
if isinstance(key, str) and key in data and data[key]:
return data[key]
key_for_err = key[0] if isinstance(key, (list, tuple)) else key
return get_from_env(key_for_err, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
key: The key to look up in the dictionary.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
Returns:
str: The value of the key.
Raises:
ValueError: If the key is not in the dictionary and no default value is
provided or if the environment variable is not set.
"""
if env_value := os.getenv(env_key):
return env_value
if default is not None:
return default
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
|
"""Utilities for environment variables."""
from __future__ import annotations
import os
from typing import Any, Optional, Union
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def get_from_dict_or_env(
data: dict[str, Any],
key: Union[str, list[str]],
env_key: str,
default: Optional[str] = None,
) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
data: The dictionary to look up the key in.
key: The key to look up in the dictionary. This can be a list of keys to try
in order.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
"""
if isinstance(key, (list, tuple)):
for k in key:
if k in data and data[k]:
return data[k]
if isinstance(key, str) and key in data and data[key]:
return data[key]
key_for_err = key[0] if isinstance(key, (list, tuple)) else key
return get_from_env(key_for_err, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
key: The key to look up in the dictionary.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
Returns:
str: The value of the key.
Raises:
ValueError: If the key is not in the dictionary and no default value is
provided or if the environment variable is not set.
"""
if env_key in os.environ and os.environ[env_key]:
return os.environ[env_key]
if default is not None:
return default
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
|
"""Base types for ReAct agent."""
from abc import abstractmethod
from typing import Dict
from llama_index.core.bridge.pydantic import BaseModel
class BaseReasoningStep(BaseModel):
"""Reasoning step."""
@abstractmethod
def get_content(self) -> str:
"""Get content."""
@property
@abstractmethod
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
class ActionReasoningStep(BaseReasoningStep):
"""Action Reasoning step."""
thought: str
action: str
action_input: Dict
def get_content(self) -> str:
"""Get content."""
return (
f"Thought: {self.thought}\nAction: {self.action}\n"
f"Action Input: {self.action_input}"
)
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return False
class ObservationReasoningStep(BaseReasoningStep):
"""Observation reasoning step."""
observation: str
return_direct: bool = False
def get_content(self) -> str:
"""Get content."""
return f"Observation: {self.observation}"
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return self.return_direct
class ResponseReasoningStep(BaseReasoningStep):
"""Response reasoning step."""
thought: str
response: str
is_streaming: bool = False
def get_content(self) -> str:
"""Get content."""
if self.is_streaming:
return f"Thought: {self.thought}\nAnswer (Starts With): {self.response} ..."
else:
return f"Thought: {self.thought}\nAnswer: {self.response}"
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return True
|
"""Base types for ReAct agent."""
from abc import abstractmethod
from typing import Dict
from llama_index.core.bridge.pydantic import BaseModel
class BaseReasoningStep(BaseModel):
"""Reasoning step."""
@abstractmethod
def get_content(self) -> str:
"""Get content."""
@property
@abstractmethod
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
class ActionReasoningStep(BaseReasoningStep):
"""Action Reasoning step."""
thought: str
action: str
action_input: Dict
def get_content(self) -> str:
"""Get content."""
return (
f"Thought: {self.thought}\nAction: {self.action}\n"
f"Action Input: {self.action_input}"
)
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return False
class ObservationReasoningStep(BaseReasoningStep):
"""Observation reasoning step."""
observation: str
return_direct: bool = False
def get_content(self) -> str:
"""Get content."""
return f"Observation: {self.observation}"
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return self.return_direct
class ResponseReasoningStep(BaseReasoningStep):
"""Response reasoning step."""
thought: str
response: str
is_streaming: bool = False
def get_content(self) -> str:
"""Get content."""
if self.is_streaming:
return (
f"Thought: {self.thought}\n"
f"Answer (Starts With): {self.response} ..."
)
else:
return f"Thought: {self.thought}\n" f"Answer: {self.response}"
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return True
|
"""Custom **exceptions** for LangChain."""
from enum import Enum
from typing import Any, Optional
class LangChainException(Exception): # noqa: N818
"""General LangChain exception."""
class TracerException(LangChainException):
"""Base class for exceptions in tracers module."""
class OutputParserException(ValueError, LangChainException): # noqa: N818
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser. OutputParserExceptions will be
available to catch and handle in ways to fix the parsing error, while other
errors will be raised.
"""
def __init__(
self,
error: Any,
observation: Optional[str] = None,
llm_output: Optional[str] = None,
send_to_llm: bool = False, # noqa: FBT001,FBT002
):
"""Create an OutputParserException.
Args:
error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a
model to try and remediate the issue. Defaults to None.
llm_output: String model output which is error-ing.
Defaults to None.
send_to_llm: Whether to send the observation and llm_output back to an Agent
after an OutputParserException has been raised.
This gives the underlying model driving the agent the context that the
previous output was improperly structured, in the hopes that it will
update the output to the correct format.
Defaults to False.
"""
if isinstance(error, str):
error = create_message(
message=error, error_code=ErrorCode.OUTPUT_PARSING_FAILURE
)
super().__init__(error)
if send_to_llm and (observation is None or llm_output is None):
msg = (
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
raise ValueError(msg)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm
class ErrorCode(Enum):
"""Error codes."""
INVALID_PROMPT_INPUT = "INVALID_PROMPT_INPUT"
INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS"
MESSAGE_COERCION_FAILURE = "MESSAGE_COERCION_FAILURE"
MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION"
MODEL_NOT_FOUND = "MODEL_NOT_FOUND"
MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT"
OUTPUT_PARSING_FAILURE = "OUTPUT_PARSING_FAILURE"
def create_message(*, message: str, error_code: ErrorCode) -> str:
"""Create a message with a link to the LangChain troubleshooting guide.
Args:
message: The message to display.
error_code: The error code to display.
"""
return (
f"{message}\n"
"For troubleshooting, visit: https://python.langchain.com/docs/"
f"troubleshooting/errors/{error_code.value} "
)
|
"""Custom **exceptions** for LangChain."""
from enum import Enum
from typing import Any, Optional
class LangChainException(Exception): # noqa: N818
"""General LangChain exception."""
class TracerException(LangChainException):
"""Base class for exceptions in tracers module."""
class OutputParserException(ValueError, LangChainException): # noqa: N818
"""Exception that output parsers should raise to signify a parsing error.
This exists to differentiate parsing errors from other code or execution errors
that also may arise inside the output parser. OutputParserExceptions will be
available to catch and handle in ways to fix the parsing error, while other
errors will be raised.
"""
def __init__(
self,
error: Any,
observation: Optional[str] = None,
llm_output: Optional[str] = None,
send_to_llm: bool = False,
):
"""Create an OutputParserException.
Args:
error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a
model to try and remediate the issue. Defaults to None.
llm_output: String model output which is error-ing.
Defaults to None.
send_to_llm: Whether to send the observation and llm_output back to an Agent
after an OutputParserException has been raised.
This gives the underlying model driving the agent the context that the
previous output was improperly structured, in the hopes that it will
update the output to the correct format.
Defaults to False.
"""
if isinstance(error, str):
error = create_message(
message=error, error_code=ErrorCode.OUTPUT_PARSING_FAILURE
)
super().__init__(error)
if send_to_llm and (observation is None or llm_output is None):
msg = (
"Arguments 'observation' & 'llm_output'"
" are required if 'send_to_llm' is True"
)
raise ValueError(msg)
self.observation = observation
self.llm_output = llm_output
self.send_to_llm = send_to_llm
class ErrorCode(Enum):
"""Error codes."""
INVALID_PROMPT_INPUT = "INVALID_PROMPT_INPUT"
INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS"
MESSAGE_COERCION_FAILURE = "MESSAGE_COERCION_FAILURE"
MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION"
MODEL_NOT_FOUND = "MODEL_NOT_FOUND"
MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT"
OUTPUT_PARSING_FAILURE = "OUTPUT_PARSING_FAILURE"
def create_message(*, message: str, error_code: ErrorCode) -> str:
"""Create a message with a link to the LangChain troubleshooting guide.
Args:
message: The message to display.
error_code: The error code to display.
"""
return (
f"{message}\n"
"For troubleshooting, visit: https://python.langchain.com/docs/"
f"troubleshooting/errors/{error_code.value} "
)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Swin2SR."""
from typing import Optional, Union
from ...image_processing_utils import BatchFeature, ChannelDimension, get_image_size
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import ImageInput
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
)
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
class Swin2SRFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to make the height and width divisible by `window_size`.
pad_size (`int`, *optional*, defaults to `8`):
The size of the sliding window for the local attention.
"""
do_pad: Optional[bool]
pad_size: Optional[int]
@auto_docstring
class Swin2SRImageProcessorFast(BaseImageProcessorFast):
do_rescale = True
rescale_factor = 1 / 255
do_pad = True
pad_size = 8
valid_kwargs = Swin2SRFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Swin2SRFastImageProcessorKwargs]):
super().__init__(**kwargs)
def preprocess(self, images: ImageInput, **kwargs: Unpack[Swin2SRFastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def pad(self, images: "torch.Tensor", size: int) -> "torch.Tensor":
"""
Pad an image to make the height and width divisible by `size`.
Args:
images (`torch.Tensor`):
Images to pad.
size (`int`):
The size to make the height and width divisible by.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = get_image_size(images, ChannelDimension.FIRST)
pad_height = (height // size + 1) * size - height
pad_width = (width // size + 1) * size - width
return F.pad(
images,
(0, 0, pad_width, pad_height),
padding_mode="symmetric",
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_rescale: bool,
rescale_factor: float,
do_pad: bool,
pad_size: int,
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_image_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, scale=rescale_factor)
if do_pad:
stacked_images = self.pad(stacked_images, size=pad_size)
processed_image_grouped[shape] = stacked_images
processed_images = reorder_images(processed_image_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["Swin2SRImageProcessorFast"]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Swin2SR."""
from typing import Optional, Union
from ...image_processing_utils import BatchFeature, ChannelDimension, get_image_size
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import ImageInput
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
)
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
class Swin2SRFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to make the height and width divisible by `window_size`.
pad_size (`int`, *optional*, defaults to `8`):
The size of the sliding window for the local attention.
"""
do_pad: Optional[bool]
pad_size: Optional[int]
@auto_docstring
class Swin2SRImageProcessorFast(BaseImageProcessorFast):
do_rescale = True
rescale_factor = 1 / 255
do_pad = True
pad_size = 8
valid_kwargs = Swin2SRFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Swin2SRFastImageProcessorKwargs]):
super().__init__(**kwargs)
def preprocess(self, images: ImageInput, **kwargs: Unpack[Swin2SRFastImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def pad(self, images: "torch.Tensor", size: int) -> "torch.Tensor":
"""
Pad an image to make the height and width divisible by `size`.
Args:
images (`torch.Tensor`):
Images to pad.
size (`int`):
The size to make the height and width divisible by.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = get_image_size(images, ChannelDimension.FIRST)
pad_height = (height // size + 1) * size - height
pad_width = (width // size + 1) * size - width
return F.pad(
images,
(0, 0, pad_width, pad_height),
padding_mode="symmetric",
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_rescale: bool,
rescale_factor: float,
do_pad: bool,
pad_size: int,
return_tensors: Optional[Union[str, TensorType]],
interpolation: Optional["F.InterpolationMode"],
**kwargs,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images)
processed_image_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_rescale:
stacked_images = self.rescale(stacked_images, scale=rescale_factor)
if do_pad:
stacked_images = self.pad(stacked_images, size=pad_size)
processed_image_grouped[shape] = stacked_images
processed_images = reorder_images(processed_image_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["Swin2SRImageProcessorFast"]
|
import os
import time
import pytest
from docarray import Document
from jina import Client, Flow
from jina.serve.networking import GrpcConnectionPool
@pytest.fixture
def error_log_level():
old_env = os.environ.get('JINA_LOG_LEVEL')
os.environ['JINA_LOG_LEVEL'] = 'ERROR'
yield
os.environ['JINA_LOG_LEVEL'] = old_env
@pytest.fixture
def cert_pem():
"""This is the cert entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.crt'
@pytest.fixture
def key_pem():
"""This is the key entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.key'
def test_grpc_ssl_with_flow(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as f:
with pytest.raises(ConnectionError):
Client(protocol='grpc', port=f.port, tls=True).index([Document()])
# the openssl error from above seems to take a bit to actually terminate and may cause the next test to seg fault
time.sleep(15.0)
def test_grpc_ssl_with_flow_and_client(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as flow:
with open(cert_pem, 'rb') as f:
creds = f.read()
GrpcConnectionPool.send_health_check_sync(
target=f'localhost:{flow.port}',
root_certificates=creds,
tls=True,
timeout=1.0,
)
time.sleep(15.0)
|
import os
import time
import pytest
from docarray import Document
from jina import Client, Flow
from jina.serve.networking import GrpcConnectionPool
@pytest.fixture
def error_log_level():
old_env = os.environ.get('JINA_LOG_LEVEL')
os.environ['JINA_LOG_LEVEL'] = 'ERROR'
yield
os.environ['JINA_LOG_LEVEL'] = old_env
@pytest.fixture
def cert_pem():
"""This is the cert entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.crt'
@pytest.fixture
def key_pem():
"""This is the key entry of a self-signed local cert"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/cert/server.key'
def test_grpc_ssl_with_flow(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as f:
with pytest.raises(ConnectionError):
Client(protocol='grpc', port=f.port, tls=True).index([Document()])
# the openssl error from above seems to take a bit to actually terminate and may cause the next test to seg fault
time.sleep(1.0)
def test_grpc_ssl_with_flow_and_client(cert_pem, key_pem, error_log_level):
with Flow(
protocol='grpc',
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as flow:
with open(cert_pem, 'rb') as f:
creds = f.read()
GrpcConnectionPool.send_health_check_sync(
target=f'localhost:{flow.port}',
root_certificates=creds,
tls=True,
timeout=1.0,
)
|
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encoder_ctx() -> DPRTextEncoder:
return DPRTextEncoder(
'facebook/dpr-ctx_encoder-single-nq-base',
encoder_type='context',
title_tag_key='title',
)
def test_config():
encoder = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert encoder.encoder_type == 'question'
def test_no_document(basic_encoder: DPRTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_context_encoder_doc_no_title(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
with pytest.raises(ValueError, match='If you set `title_tag_key` property'):
basic_encoder_ctx.encode(docs, {})
def test_wrong_encoder_type():
with pytest.raises(ValueError, match='The ``encoder_type`` parameter'):
DPRTextEncoder(encoder_type='worng_type')
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def test_encoding_question_type(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
def test_encoding_context_type(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there', tags={'title': 'greeting'})])
basic_encoder_ctx.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: DPRTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: DPRTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: DPRTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from pathlib import Path
from typing import List
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...dpr_text import DPRTextEncoder
@pytest.fixture(scope='session')
def basic_encoder() -> DPRTextEncoder:
return DPRTextEncoder()
@pytest.fixture(scope='session')
def basic_encoder_ctx() -> DPRTextEncoder:
return DPRTextEncoder(
'facebook/dpr-ctx_encoder-single-nq-base',
encoder_type='context',
title_tag_key='title',
)
def test_config():
encoder = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert encoder.default_batch_size == 32
assert encoder.default_traversal_paths == ('r',)
assert encoder.encoder_type == 'question'
assert encoder.title_tag_key is None
def test_no_document(basic_encoder: DPRTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_context_encoder_doc_no_title(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
with pytest.raises(ValueError, match='If you set `title_tag_key` property'):
basic_encoder_ctx.encode(docs, {})
def test_wrong_encoder_type():
with pytest.raises(ValueError, match='The ``encoder_type`` parameter'):
encoder = DPRTextEncoder(encoder_type='worng_type')
def test_encoding_cpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cpu')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
def test_encoding_question_type(basic_encoder: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
def test_encoding_context_type(basic_encoder_ctx: DPRTextEncoder):
docs = DocumentArray([Document(text='hello there', tags={'title': 'greeting'})])
basic_encoder_ctx.encode(docs, {})
assert docs[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
docs = DocumentArray([Document(text='hello there')])
encoder = DPRTextEncoder(device='cuda')
encoder.encode(docs, {})
assert docs[0].embedding.shape == (768,)
@pytest.mark.parametrize(
'traversal_path, counts',
[
('r', [['r', 1], ['c', 0], ['cc', 0]]),
('c', [['r', 0], ['c', 3], ['cc', 0]]),
('cc', [['r', 0], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_path: str, counts: List, basic_encoder: DPRTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(
docs=docs, parameters={'traversal_paths': [traversal_path]}, return_results=True
)
for path, count in counts:
assert len(docs.traverse_flat([path]).get_attributes('embedding')) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: DPRTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (768,)
def test_quality_embeddings(basic_encoder: DPRTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
extra_columns = {col: doc.tags.get(col) for col, _ in self._config.columns}
return PointStruct(
id=self._map_id(doc.id),
payload=dict(
_serialized=doc.to_base64(**self.serialization_config), **extra_columns
),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
return PointStruct(
id=self._map_id(doc.id),
payload=dict(_serialized=doc.to_base64(**self.serialization_config)),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from torch.utils.data import ConcatDataset, Dataset
from mmdet.datasets.samplers import GroupMultiSourceSampler, MultiSourceSampler
class DummyDataset(Dataset):
def __init__(self, length, flag):
self.length = length
self.flag = flag
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(
width=self.shapes[idx][0],
height=self.shapes[idx][1],
flag=self.flag)
class DummyConcatDataset(ConcatDataset):
def _get_ori_dataset_idx(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[
dataset_idx - 1]
return dataset_idx, sample_idx
def get_data_info(self, idx: int):
dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)
return self.datasets[dataset_idx].get_data_info(sample_idx)
class TestMultiSourceSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_multi_source_sampler(self):
# test dataset is not ConcatDataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=5, source_ratio=[1, 4])
# test invalid batch_size
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=-5, source_ratio=[1, 4])
# test source_ratio longer then dataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 2, 4])
sampler = MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
for i in range(100):
idx = next(sampler)
flags.append(self.dataset.get_data_info(idx)['flag'])
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
class TestGroupMultiSourceSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_group_multi_source_sampler(self):
sampler = GroupMultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
groups = []
for i in range(100):
idx = next(sampler)
data_info = self.dataset.get_data_info(idx)
flags.append(data_info['flag'])
group = 0 if data_info['width'] < data_info['height'] else 1
groups.append(group)
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
groups = set(
[sum(x) for x in (groups[k:k + 5] for k in range(0, 100, 5))])
groups_gt = set([0, 5])
self.assertEqual(groups, groups_gt)
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from torch.utils.data import ConcatDataset, Dataset
from mmdet.datasets.samplers import GroupMultiSourceSampler, MultiSourceSampler
class DummyDataset(Dataset):
def __init__(self, length, flag):
self.length = length
self.flag = flag
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(
width=self.shapes[idx][0],
height=self.shapes[idx][1],
flag=self.flag)
class DummyConcatDataset(ConcatDataset):
def _get_ori_dataset_idx(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[
dataset_idx - 1]
return dataset_idx, sample_idx
def get_data_info(self, idx: int):
dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)
return self.datasets[dataset_idx].get_data_info(sample_idx)
class TestMultiSourceSampler(TestCase):
@patch('mmengine.data.sampler.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_multi_source_sampler(self):
# test dataset is not ConcatDataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=5, source_ratio=[1, 4])
# test invalid batch_size
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=-5, source_ratio=[1, 4])
# test source_ratio longer then dataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 2, 4])
sampler = MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
for i in range(100):
idx = next(sampler)
flags.append(self.dataset.get_data_info(idx)['flag'])
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
class TestGroupMultiSourceSampler(TestCase):
@patch('mmengine.data.sampler.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_group_multi_source_sampler(self):
sampler = GroupMultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
groups = []
for i in range(100):
idx = next(sampler)
data_info = self.dataset.get_data_info(idx)
flags.append(data_info['flag'])
group = 0 if data_info['width'] < data_info['height'] else 1
groups.append(group)
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
groups = set(
[sum(x) for x in (groups[k:k + 5] for k in range(0, 100, 5))])
groups_gt = set([0, 5])
self.assertEqual(groups, groups_gt)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_caption_metric import COCOCaptionMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric',
'COCOCaptionMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .reid_metric import ReIDMetrics
from .voc_metric import VOCMetric
from .youtube_vis_metric import YouTubeVISMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric', 'ReIDMetrics', 'YouTubeVISMetric'
]
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json_doclist():
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocList[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize('tensor_type', [TorchTensor, NdArray])
def test_from_to_json_docvec(tensor_type):
def generate_docs(tensor_type):
class InnerDoc(BaseDoc):
tens: tensor_type
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: tensor_type
tens_none: Optional[tensor_type]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
def _rand_vec_gen(tensor_type):
arr = np.random.rand(5)
if tensor_type == TorchTensor:
arr = torch.from_numpy(arr).to(torch.float32)
return arr
inner = InnerDoc(tens=_rand_vec_gen(tensor_type))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=tensor_type)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=_rand_vec_gen(tensor_type),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=tensor_type,
)
return vec
v = generate_docs(tensor_type)
json_str = v.to_json()
v_after = DocVec[v.doc_type].from_json(json_str, tensor_type=tensor_type)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
@pytest.mark.tensorflow
def test_from_to_json_docvec_tf():
from docarray.typing import TensorFlowTensor
def generate_docs():
class InnerDoc(BaseDoc):
tens: TensorFlowTensor
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: TensorFlowTensor
tens_none: Optional[TensorFlowTensor]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
inner = InnerDoc(tens=np.random.rand(5))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=TensorFlowTensor)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=np.random.rand(5),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=TensorFlowTensor,
)
return vec
v = generate_docs()
json_str = v.to_json()
v_after = DocVec[v.doc_type].from_json(json_str, tensor_type=TensorFlowTensor)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
def test_union_type():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
docs_copy = docs.from_json(docs.to_json())
assert docs == docs_copy
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_json_tensor_type(tensor_type):
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
json_da = da.to_json()
da2 = DocVec[MyDoc].from_json(json_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json_doclist():
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocList[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize('tensor_type', [TorchTensor, NdArray])
def test_from_to_json_docvec(tensor_type):
def generate_docs(tensor_type):
class InnerDoc(BaseDoc):
tens: tensor_type
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: tensor_type
tens_none: Optional[tensor_type]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
def _rand_vec_gen(tensor_type):
arr = np.random.rand(5)
if tensor_type == TorchTensor:
arr = torch.from_numpy(arr).to(torch.float32)
return arr
inner = InnerDoc(tens=_rand_vec_gen(tensor_type))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=tensor_type)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=_rand_vec_gen(tensor_type),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=tensor_type,
)
return vec
v = generate_docs(tensor_type)
bytes_ = v.to_json()
v_after = DocVec[v.doc_type].from_json(bytes_, tensor_type=tensor_type)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
@pytest.mark.tensorflow
def test_from_to_json_docvec_tf():
from docarray.typing import TensorFlowTensor
def generate_docs():
class InnerDoc(BaseDoc):
tens: TensorFlowTensor
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: TensorFlowTensor
tens_none: Optional[TensorFlowTensor]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
inner = InnerDoc(tens=np.random.rand(5))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=TensorFlowTensor)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=np.random.rand(5),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=TensorFlowTensor,
)
return vec
v = generate_docs()
bytes_ = v.to_json()
v_after = DocVec[v.doc_type].from_json(bytes_, tensor_type=TensorFlowTensor)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
def test_union_type():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
docs_copy = docs.from_json(docs.to_json())
assert docs == docs_copy
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_json_tensor_type(tensor_type):
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
json_da = da.to_json()
da2 = DocVec[MyDoc].from_json(json_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.core.utils import OptMultiConfig
from mmdet.registry import MODELS
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channels (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Defaults to True.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...],
use_dcn: bool = True,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channels = in_channels
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(
self, num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channels = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channels,
feat_channels,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channels,
feat_channels,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channels = feat_channels
return nn.Sequential(*layers)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
"""model forward."""
assert isinstance(x, (list, tuple))
outs = self.deconv_layers(x[-1])
return outs,
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmdet.core.utils import OptMultiConfig
from mmdet.registry import MODELS
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channels (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Defaults to True.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...],
use_dcn: bool = True,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channels = in_channels
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(
self, num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channels = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channels,
feat_channels,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channels,
feat_channels,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channels = feat_channels
return nn.Sequential(*layers)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
"""model forward."""
assert isinstance(x, (list, tuple))
outs = self.deconv_layers(x[-1])
return outs,
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.proto import NdArrayProto
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
def to_protobuf(self) -> 'NdArrayProto':
"""
transform self into a NdArrayProto protobuf message
"""
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Optional
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .aflink import AppearanceFreeLink
from .camera_motion_compensation import CameraMotionCompensation
from .interpolation import InterpolateTracklets
from .kalman_filter import KalmanFilter
from .similarity import embed_similarity
__all__ = [
'KalmanFilter', 'InterpolateTracklets', 'embed_similarity',
'AppearanceFreeLink', 'CameraMotionCompensation'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .interpolation import InterpolateTracklets
from .kalman_filter import KalmanFilter
from .similarity import embed_similarity
__all__ = ['KalmanFilter', 'InterpolateTracklets', 'embed_similarity']
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39, needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial001",
pytest.param("tutorial001_py310", marks=needs_py310),
"tutorial001_an",
pytest.param("tutorial001_an_py39", marks=needs_py39),
pytest.param("tutorial001_an_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.additional_status_codes.{request.param}")
client = TestClient(mod.app)
return client
def test_update(client: TestClient):
response = client.put("/items/foo", json={"name": "Wrestlers"})
assert response.status_code == 200, response.text
assert response.json() == {"name": "Wrestlers", "size": None}
def test_create(client: TestClient):
response = client.put("/items/red", json={"name": "Chillies"})
assert response.status_code == 201, response.text
assert response.json() == {"name": "Chillies", "size": None}
|
from fastapi.testclient import TestClient
from docs_src.additional_status_codes.tutorial001 import app
client = TestClient(app)
def test_update():
response = client.put("/items/foo", json={"name": "Wrestlers"})
assert response.status_code == 200, response.text
assert response.json() == {"name": "Wrestlers", "size": None}
def test_create():
response = client.put("/items/red", json={"name": "Chillies"})
assert response.status_code == 201, response.text
assert response.json() == {"name": "Chillies", "size": None}
|
"""Notion tool spec."""
from typing import Any, Dict, List, Optional, Type
import requests
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
from llama_index.readers.notion import NotionPageReader
SEARCH_URL = "https://api.notion.com/v1/search"
class NotionLoadDataSchema(BaseModel):
"""Notion load data schema."""
page_ids: Optional[List[str]] = None
database_id: Optional[str] = None
class NotionSearchDataSchema(BaseModel):
"""Notion search data schema."""
query: str
direction: Optional[str] = None
timestamp: Optional[str] = None
value: Optional[str] = None
property: Optional[str] = None
page_size: int = 100
class NotionToolSpec(BaseToolSpec):
"""Notion tool spec.
Currently a simple wrapper around the data loader.
TODO: add more methods to the Notion spec.
"""
spec_functions = ["load_data", "search_data"]
def __init__(self, integration_token: Optional[str] = None) -> None:
"""Initialize with parameters."""
self.reader = NotionPageReader(integration_token=integration_token)
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""Return map from function name."""
if fn_name == "load_data":
return NotionLoadDataSchema
elif fn_name == "search_data":
return NotionSearchDataSchema
else:
raise ValueError(f"Invalid function name: {fn_name}")
def load_data(
self,
page_ids: Optional[List[str]] = None,
database_ids: Optional[List[str]] = None,
) -> str:
"""Loads content from a set of page ids or database ids.
Don't use this endpoint if you don't know the page ids or database ids.
"""
page_ids = page_ids or []
docs = self.reader.load_data(page_ids=page_ids, database_ids=database_ids)
return "\n".join([doc.get_content() for doc in docs])
def search_data(
self,
query: str,
direction: Optional[str] = None,
timestamp: Optional[str] = None,
value: Optional[str] = None,
property: Optional[str] = None,
page_size: int = 100,
) -> List[Dict[str, Any]]:
"""Search a list of relevant pages.
Contains metadata for each page (but not the page content).
params:
query: the title of the page or database to search for, which is fuzzy matched.
"""
payload: Dict[str, Any] = {
"query": query,
"page_size": page_size,
}
if direction is not None or timestamp is not None:
payload["sort"] = {}
if direction is not None:
payload["sort"]["direction"] = direction
if timestamp is not None:
payload["sort"]["timestamp"] = timestamp
if value is not None or property is not None:
payload["filter"] = {}
if value is not None:
payload["filter"]["value"] = value
if property is not None:
payload["filter"]["property"] = property
response = requests.post(SEARCH_URL, json=payload, headers=self.reader.headers)
response_json = response.json()
return response_json["results"]
|
"""Notion tool spec."""
from typing import Any, Dict, List, Optional, Type
import requests
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
from llama_index.readers.notion import NotionPageReader
SEARCH_URL = "https://api.notion.com/v1/search"
class NotionLoadDataSchema(BaseModel):
"""Notion load data schema."""
page_ids: Optional[List[str]] = None
database_id: Optional[str] = None
class NotionSearchDataSchema(BaseModel):
"""Notion search data schema."""
query: str
direction: Optional[str] = None
timestamp: Optional[str] = None
value: Optional[str] = None
property: Optional[str] = None
page_size: int = 100
class NotionToolSpec(BaseToolSpec):
"""Notion tool spec.
Currently a simple wrapper around the data loader.
TODO: add more methods to the Notion spec.
"""
spec_functions = ["load_data", "search_data"]
def __init__(self, integration_token: Optional[str] = None) -> None:
"""Initialize with parameters."""
self.reader = NotionPageReader(integration_token=integration_token)
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""Return map from function name."""
if fn_name == "load_data":
return NotionLoadDataSchema
elif fn_name == "search_data":
return NotionSearchDataSchema
else:
raise ValueError(f"Invalid function name: {fn_name}")
def load_data(
self, page_ids: Optional[List[str]] = None, database_id: Optional[str] = None
) -> str:
"""Loads content from a set of page ids or a database id.
Don't use this endpoint if you don't know the page ids or database id.
"""
page_ids = page_ids or []
docs = self.reader.load_data(page_ids=page_ids, database_id=database_id)
return "\n".join([doc.get_content() for doc in docs])
def search_data(
self,
query: str,
direction: Optional[str] = None,
timestamp: Optional[str] = None,
value: Optional[str] = None,
property: Optional[str] = None,
page_size: int = 100,
) -> str:
"""Search a list of relevant pages.
Contains metadata for each page (but not the page content).
"""
payload: Dict[str, Any] = {
"query": query,
"page_size": page_size,
}
if direction is not None or timestamp is not None:
payload["sort"] = {}
if direction is not None:
payload["sort"]["direction"] = direction
if timestamp is not None:
payload["sort"]["timestamp"] = timestamp
if value is not None or property is not None:
payload["filter"] = {}
if value is not None:
payload["filter"]["value"] = value
if property is not None:
payload["filter"]["property"] = property
response = requests.post(SEARCH_URL, json=payload, headers=self.reader.headers)
response_json = response.json()
return response_json["results"]
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocList[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_pandas(df=df)
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocArray[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_pandas()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocArray[nested_doc_cls].from_pandas(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocArray.from_pandas(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocArray[nested_doc.__class__].from_pandas(df=df)
|
import struct
import zlib
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HWPReader(BaseReader):
"""
Hwp Reader. Reads contents from Hwp file.
Args: None.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.FILE_HEADER_SECTION = "FileHeader"
self.HWP_SUMMARY_SECTION = "\x05HwpSummaryInformation"
self.SECTION_NAME_LENGTH = len("Section")
self.BODYTEXT_SECTION = "BodyText"
self.HWP_TEXT_TAGS = [67]
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from Hwp file.
Args:
file (Path): Path for the Hwp file.
Returns:
List[Document].
"""
import olefile
load_file = olefile.OleFileIO(file)
file_dir = load_file.listdir()
if self.is_valid(file_dir) is False:
raise Exception("Not Valid HwpFile")
result_text = self._get_text(load_file, file_dir)
result = self._text_to_document(text=result_text, extra_info=extra_info)
return [result]
def is_valid(self, dirs):
if [self.FILE_HEADER_SECTION] not in dirs:
return False
return [self.HWP_SUMMARY_SECTION] in dirs
def get_body_sections(self, dirs):
m = []
for d in dirs:
if d[0] == self.BODYTEXT_SECTION:
m.append(int(d[1][self.SECTION_NAME_LENGTH :]))
return ["BodyText/Section" + str(x) for x in sorted(m)]
def _text_to_document(
self, text: str, extra_info: Optional[Dict] = None
) -> Document:
return Document(text=text, extra_info=extra_info or {})
def get_text(self):
return self.text
# 전체 text 추출
def _get_text(self, load_file, file_dir):
sections = self.get_body_sections(file_dir)
text = ""
for section in sections:
text += self.get_text_from_section(load_file, section)
text += "\n"
self.text = text
return self.text
def is_compressed(self, load_file):
header = load_file.openstream("FileHeader")
header_data = header.read()
return (header_data[36] & 1) == 1
def get_text_from_section(self, load_file, section):
bodytext = load_file.openstream(section)
data = bodytext.read()
unpacked_data = (
zlib.decompress(data, -15) if self.is_compressed(load_file) else data
)
size = len(unpacked_data)
i = 0
text = ""
while i < size:
header = struct.unpack_from("<I", unpacked_data, i)[0]
rec_type = header & 0x3FF
(header >> 10) & 0x3FF
rec_len = (header >> 20) & 0xFFF
if rec_type in self.HWP_TEXT_TAGS:
rec_data = unpacked_data[i + 4 : i + 4 + rec_len]
text += rec_data.decode("utf-16")
text += "\n"
i += 4 + rec_len
return text
|
import struct
import zlib
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HWPReader(BaseReader):
"""Hwp Reader. Reads contents from Hwp file.
Args: None.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.FILE_HEADER_SECTION = "FileHeader"
self.HWP_SUMMARY_SECTION = "\x05HwpSummaryInformation"
self.SECTION_NAME_LENGTH = len("Section")
self.BODYTEXT_SECTION = "BodyText"
self.HWP_TEXT_TAGS = [67]
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Load data and extract table from Hwp file.
Args:
file (Path): Path for the Hwp file.
Returns:
List[Document].
"""
import olefile
load_file = olefile.OleFileIO(file)
file_dir = load_file.listdir()
if self.is_valid(file_dir) is False:
raise Exception("Not Valid HwpFile")
result_text = self._get_text(load_file, file_dir)
result = self._text_to_document(text=result_text, extra_info=extra_info)
return [result]
def is_valid(self, dirs):
if [self.FILE_HEADER_SECTION] not in dirs:
return False
return [self.HWP_SUMMARY_SECTION] in dirs
def get_body_sections(self, dirs):
m = []
for d in dirs:
if d[0] == self.BODYTEXT_SECTION:
m.append(int(d[1][self.SECTION_NAME_LENGTH :]))
return ["BodyText/Section" + str(x) for x in sorted(m)]
def _text_to_document(
self, text: str, extra_info: Optional[Dict] = None
) -> Document:
return Document(text=text, extra_info=extra_info or {})
def get_text(self):
return self.text
# 전체 text 추출
def _get_text(self, load_file, file_dir):
sections = self.get_body_sections(file_dir)
text = ""
for section in sections:
text += self.get_text_from_section(load_file, section)
text += "\n"
self.text = text
return self.text
def is_compressed(self, load_file):
header = load_file.openstream("FileHeader")
header_data = header.read()
return (header_data[36] & 1) == 1
def get_text_from_section(self, load_file, section):
bodytext = load_file.openstream(section)
data = bodytext.read()
unpacked_data = (
zlib.decompress(data, -15) if self.is_compressed(load_file) else data
)
size = len(unpacked_data)
i = 0
text = ""
while i < size:
header = struct.unpack_from("<I", unpacked_data, i)[0]
rec_type = header & 0x3FF
(header >> 10) & 0x3FF
rec_len = (header >> 20) & 0xFFF
if rec_type in self.HWP_TEXT_TAGS:
rec_data = unpacked_data[i + 4 : i + 4 + rec_len]
text += rec_data.decode("utf-16")
text += "\n"
i += 4 + rec_len
return text
|
from typing import Any, Collection, List, Optional, Tuple, Union
from llama_index.core.tools.types import AsyncBaseTool
from pydantic import BaseModel
class LLMCompilerParseResult(BaseModel):
"""LLMCompiler parser result."""
thought: str
idx: int
tool_name: str
args: str
class JoinerOutput(BaseModel):
"""Joiner output."""
thought: str
answer: str
is_replan: bool = False
def _default_stringify_rule_for_arguments(args: Union[List, Tuple]) -> str:
if len(args) == 1:
return str(args[0])
else:
return str(tuple(args))
class LLMCompilerTask(BaseModel):
"""
LLM Compiler Task.
Object taken from
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/task_fetching_unit.py.
"""
idx: int
name: str
# tool: Callable
tool: AsyncBaseTool
args: Union[List, Tuple]
dependencies: Collection[int]
# TODO: look into this
# stringify_rule: Optional[Callable] = None
thought: Optional[str] = None
observation: Optional[str] = None
is_join: bool = False
class Config:
arbitrary_types_allowed = True
async def __call__(self) -> Any:
return await self.tool.acall(*self.args)
def get_thought_action_observation(
self,
include_action: bool = True,
include_thought: bool = True,
include_action_idx: bool = False,
) -> str:
thought_action_observation = ""
if self.thought and include_thought:
thought_action_observation = f"Thought: {self.thought}\n"
if include_action:
idx = f"{self.idx}. " if include_action_idx else ""
# if self.stringify_rule:
# # If the user has specified a custom stringify rule for the
# # function argument, use it
# thought_action_observation += f"{idx}{self.stringify_rule(self.args)}\n"
# else:
# Otherwise, we have a default stringify rule
thought_action_observation += (
f"{idx}{self.name}"
f"{_default_stringify_rule_for_arguments(self.args)}\n"
)
if self.observation is not None:
thought_action_observation += f"Observation: {self.observation}\n"
return thought_action_observation
|
from typing import Any, Collection, List, Optional, Tuple, Union
from llama_index.core.tools.types import AsyncBaseTool
from pydantic import BaseModel
class LLMCompilerParseResult(BaseModel):
"""LLMCompiler parser result."""
thought: str
idx: int
tool_name: str
args: str
class JoinerOutput(BaseModel):
"""Joiner output."""
thought: str
answer: str
is_replan: bool = False
def _default_stringify_rule_for_arguments(args: Union[List, Tuple]) -> str:
if len(args) == 1:
return str(args[0])
else:
return str(tuple(args))
class LLMCompilerTask(BaseModel):
"""LLM Compiler Task.
Object taken from
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/task_fetching_unit.py.
"""
idx: int
name: str
# tool: Callable
tool: AsyncBaseTool
args: Union[List, Tuple]
dependencies: Collection[int]
# TODO: look into this
# stringify_rule: Optional[Callable] = None
thought: Optional[str] = None
observation: Optional[str] = None
is_join: bool = False
class Config:
arbitrary_types_allowed = True
async def __call__(self) -> Any:
return await self.tool.acall(*self.args)
def get_thought_action_observation(
self,
include_action: bool = True,
include_thought: bool = True,
include_action_idx: bool = False,
) -> str:
thought_action_observation = ""
if self.thought and include_thought:
thought_action_observation = f"Thought: {self.thought}\n"
if include_action:
idx = f"{self.idx}. " if include_action_idx else ""
# if self.stringify_rule:
# # If the user has specified a custom stringify rule for the
# # function argument, use it
# thought_action_observation += f"{idx}{self.stringify_rule(self.args)}\n"
# else:
# Otherwise, we have a default stringify rule
thought_action_observation += (
f"{idx}{self.name}"
f"{_default_stringify_rule_for_arguments(self.args)}\n"
)
if self.observation is not None:
thought_action_observation += f"Observation: {self.observation}\n"
return thought_action_observation
|
_base_ = './scnet_x101-64x4d_fpn_20e_coco.py'
train_dataloader = dict(batch_size=1, num_workers=1)
optim_wrapper = dict(optimizer=dict(lr=0.01))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = './scnet_x101_64x4d_fpn_20e_coco.py'
train_dataloader = dict(batch_size=1, num_workers=1)
optim_wrapper = dict(optimizer=dict(lr=0.01))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
"""Global Gemini Utilities (shared between Gemini LLM and Vertex)."""
from __future__ import annotations
from collections.abc import Sequence
from llama_index.core.base.llms.types import ChatMessage, MessageRole
ROLES_TO_GEMINI: dict[MessageRole, MessageRole] = {
MessageRole.USER: MessageRole.USER,
MessageRole.ASSISTANT: MessageRole.MODEL,
## Gemini chat mode only has user and model roles. Put the rest in user role.
MessageRole.SYSTEM: MessageRole.USER,
MessageRole.MODEL: MessageRole.MODEL,
## Gemini has function role, but chat mode only accepts user and model roles.
## https://medium.com/@smallufo/openai-vs-gemini-function-calling-a664f7f2b29f
## Agent response's 'tool/function' role is converted to 'user' role.
MessageRole.TOOL: MessageRole.USER,
MessageRole.FUNCTION: MessageRole.USER,
}
ROLES_FROM_GEMINI: dict[str, MessageRole] = {
## Gemini has user, model and function roles.
"user": MessageRole.USER,
"model": MessageRole.ASSISTANT,
"function": MessageRole.TOOL,
}
def merge_neighboring_same_role_messages(
messages: Sequence[ChatMessage],
) -> Sequence[ChatMessage]:
if len(messages) < 2:
# Nothing to merge
return messages
# Gemini does not support multiple messages of the same role in a row, so we merge them
merged_messages = []
i = 0
while i < len(messages):
current_message = messages[i]
# Initialize merged content with current message content
merged_content = [current_message.content]
# Check if the next message exists and has the same role
while (
i + 1 < len(messages)
and ROLES_TO_GEMINI[messages[i + 1].role]
== ROLES_TO_GEMINI[current_message.role]
):
i += 1
next_message = messages[i]
merged_content.extend([next_message.content])
# Create a new ChatMessage or similar object with merged content
merged_message = ChatMessage(
role=ROLES_TO_GEMINI[current_message.role],
content="\n".join([str(msg_content) for msg_content in merged_content]),
additional_kwargs=current_message.additional_kwargs,
)
merged_messages.append(merged_message)
i += 1
return merged_messages
|
"""Global Gemini Utilities (shared between Gemini LLM and Vertex)."""
from collections.abc import Sequence
from typing import Dict
from llama_index.core.base.llms.types import ChatMessage, MessageRole
ROLES_TO_GEMINI: Dict[MessageRole, MessageRole] = {
MessageRole.USER: MessageRole.USER,
MessageRole.ASSISTANT: MessageRole.MODEL,
## Gemini chat mode only has user and model roles. Put the rest in user role.
MessageRole.SYSTEM: MessageRole.USER,
MessageRole.MODEL: MessageRole.MODEL,
## Gemini has function role, but chat mode only accepts user and model roles.
## https://medium.com/@smallufo/openai-vs-gemini-function-calling-a664f7f2b29f
## Agent response's 'tool/function' role is converted to 'user' role.
MessageRole.TOOL: MessageRole.USER,
MessageRole.FUNCTION: MessageRole.USER,
}
ROLES_FROM_GEMINI: Dict[MessageRole, MessageRole] = {
## Gemini has user, model and function roles.
MessageRole.USER: MessageRole.USER,
MessageRole.MODEL: MessageRole.ASSISTANT,
MessageRole.FUNCTION: MessageRole.TOOL,
}
def merge_neighboring_same_role_messages(
messages: Sequence[ChatMessage],
) -> Sequence[ChatMessage]:
# Gemini does not support multiple messages of the same role in a row, so we merge them
merged_messages = []
i = 0
while i < len(messages):
current_message = messages[i]
# Initialize merged content with current message content
merged_content = [current_message.content]
# Check if the next message exists and has the same role
while (
i + 1 < len(messages)
and ROLES_TO_GEMINI[messages[i + 1].role]
== ROLES_TO_GEMINI[current_message.role]
):
i += 1
next_message = messages[i]
merged_content.extend([next_message.content])
# Create a new ChatMessage or similar object with merged content
merged_message = ChatMessage(
role=ROLES_TO_GEMINI[current_message.role],
content="\n".join([str(msg_content) for msg_content in merged_content]),
additional_kwargs=current_message.additional_kwargs,
)
merged_messages.append(merged_message)
i += 1
return merged_messages
|
from typing import Optional
import numpy as np
import torch
from docarray import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor, TorchTensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: Tensor
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
from typing import Optional
import numpy as np
from docarray import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: Tensor
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = val_evaluator
|
from __future__ import annotations
import gzip
from . import InputExample
class PairedFilesReader(object):
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
import gzip
from . import InputExample
class PairedFilesReader(object):
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
import hashlib
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Any, Callable, Optional, Union
from typing_extensions import TypeAlias
import torch.fx.graph
class CustomGraphPass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual reprensentation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
** IMPORTANT ** If your custom pass's behavior depends on some external state, then
you'll need to implement something more complicated (or disable caching).
EXAMPLE:
class MyCustomGraphPass(CustomGraphPass):
def __call__(self, graph: torch.fx.graph.Graph) -> None:
# my custom graph optimization pass
# ...
def uuid(self) -> Optional[Any]:
return get_hash_for_files((__file__,))
"""
@abstractmethod
def __call__(self, graph: torch.fx.graph.Graph) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
class CustomGraphModulePass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual reprensentation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
"""
@abstractmethod
def __call__(self, gm: torch.fx.GraphModule) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
CustomGraphPassType: TypeAlias = Optional[
Union[CustomGraphPass, Callable[[torch.fx.graph.Graph], None]]
]
@lru_cache(1)
def get_hash_for_files(paths: tuple[str], extra: str = "") -> bytes:
"""
Helper to compute a unique string by hashing the contents of a list of files.
"""
hasher = hashlib.sha256()
hasher.update(extra.encode("utf-8"))
for path in paths:
with open(path, "rb") as f:
hasher.update(path.encode("utf-8"))
hasher.update(f.read())
return hasher.digest()
|
import hashlib
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Any, Callable, Optional, Union
from typing_extensions import TypeAlias
import torch.fx.graph
class CustomGraphPass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual representation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
** IMPORTANT ** If your custom pass's behavior depends on some external state, then
you'll need to implement something more complicated (or disable caching).
EXAMPLE:
class MyCustomGraphPass(CustomGraphPass):
def __call__(self, graph: torch.fx.graph.Graph) -> None:
# my custom graph optimization pass
# ...
def uuid(self) -> Optional[Any]:
return get_hash_for_files((__file__,))
"""
@abstractmethod
def __call__(self, graph: torch.fx.graph.Graph) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
class CustomGraphModulePass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual representation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
"""
@abstractmethod
def __call__(self, gm: torch.fx.GraphModule) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
CustomGraphPassType: TypeAlias = Optional[
Union[CustomGraphPass, Callable[[torch.fx.graph.Graph], None]]
]
@lru_cache(1)
def get_hash_for_files(paths: tuple[str], extra: str = "") -> bytes:
"""
Helper to compute a unique string by hashing the contents of a list of files.
"""
hasher = hashlib.sha256()
hasher.update(extra.encode("utf-8"))
for path in paths:
with open(path, "rb") as f:
hasher.update(path.encode("utf-8"))
hasher.update(f.read())
return hasher.digest()
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a VerticesAndFaces object containing
vertices and faces information.
---
```python
from docarray import BaseDoc
import numpy as np
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
```
---
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a VerticesAndFaces object containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
ListOfDocVecProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
ListOfDocVecProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocListProto',
'DocProto',
'NdArrayProto',
'NodeProto',
'DocVecProto',
'DocListProto',
'ListOfDocArrayProto',
'ListOfDocVecProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocListProto',
'DocProto',
'NdArrayProto',
'NodeProto',
'DocVecProto',
'DocListProto',
'ListOfDocArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from image_tf_encoder import ImageTFEncoder
from jina import Document, DocumentArray, Flow
input_dim = 336
target_output_dim = 1280
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((input_dim, input_dim, 3), dtype=np.float32)),
],
)
def test_tf_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True,
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (target_output_dim,)
def test_tf_batch():
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((input_dim, input_dim, 3), dtype=np.float32))
for _ in range(25)
),
return_results=True,
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (
target_output_dim,
)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(
pytest.lazy_fixture('docs_with_blobs'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[[['r'], 0], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[[['r'], 0], [['c'], 0], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: List[str]
):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': traversal_paths},
return_results=True,
)
for path, count in docs_per_path:
embeddings = (
DocumentArray(results[0].docs)
.traverse_flat(path)
.get_attributes('embedding')
)
assert len([x for x in embeddings if x is not None]) == count
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((input_dim, input_dim, 3), dtype=np.float32)),
],
)
def test_tf_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True,
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (target_output_dim,)
def test_tf_batch():
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((input_dim, input_dim, 3), dtype=np.float32))
for _ in range(25)
),
return_results=True,
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (
target_output_dim,
)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(
pytest.lazy_fixture('docs_with_blobs'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[[['r'], 0], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[[['r'], 0], [['c'], 0], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: List[str]
):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': traversal_paths},
return_results=True,
)
for path, count in docs_per_path:
embeddings = (
DocumentArray(results[0].docs)
.traverse_flat(path)
.get_attributes('embedding')
)
assert len([x for x in embeddings if x is not None]) == count
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(point_cloud, np.ndarray)
assert len(point_cloud.shape) == 3
assert point_cloud.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(point_cloud, np.ndarray)
assert len(point_cloud.shape) == 3
assert point_cloud.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from ..transforms import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import BBOX_CODERS
from ..transforms import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
|
from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: Tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: Tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(None, transform=transform, target_transform=target_transform) # type: ignore[arg-type]
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataSample]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""
Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab seperated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(self, dataset_folder, s1_col_idx=0, s2_col_idx=1, score_col_idx=2, delimiter="\t",
quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""
filename specified which data split to use (train.csv, dev.csv, test.csv).
"""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, 'rt', encoding='utf8') if filename.endswith('.gz') else open(filepath, encoding="utf-8") as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename+str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""
Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(self, dataset_folder, s1_col_idx=5, s2_col_idx=6, score_col_idx=4, delimiter="\t",
quoting=csv.QUOTE_NONE, normalize_scores=True, min_score=0, max_score=5):
super().__init__(dataset_folder=dataset_folder, s1_col_idx=s1_col_idx, s2_col_idx=s2_col_idx, score_col_idx=score_col_idx, delimiter=delimiter,
quoting=quoting, normalize_scores=normalize_scores, min_score=min_score, max_score=max_score)
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from docarray.math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite._search_documents(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
def _filter(
self,
filter: Dict,
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Annlite` filter).
:param filter: the input filter to apply in each stored document
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
docs = self._annlite.filter(
filter=filter, limit=limit, include_metadata=not only_id
)
return DocumentArray(docs)
|
from typing import (
Union,
Optional,
TYPE_CHECKING,
List,
Dict,
)
if TYPE_CHECKING:
import numpy as np
from .... import DocumentArray
class FindMixin:
def _find(
self,
query: 'np.ndarray',
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given an input query.
:param query: the query documents to search.
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:param filter: filter query used for pre-filtering
:param kwargs: other kwargs.
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
from ....math import ndarray
n_rows, _ = ndarray.get_array_rows(query)
if n_rows == 1:
query = query.reshape(1, -1)
_, match_docs = self._annlite._search_documents(
query, limit=limit, filter=filter or {}, include_metadata=not only_id
)
return match_docs
def _filter(
self,
filter: Dict,
limit: Optional[Union[int, float]] = 20,
only_id: bool = False,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Annlite` filter).
:param filter: the input filter to apply in each stored document
:param limit: the number of results to get for each query document in search.
:param only_id: if set, then returning matches will only contain ``id``
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
docs = self._annlite.filter(
filter=filter, limit=limit, include_metadata=not only_id
)
return DocumentArray(docs)
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datapoints import BoundingBox, Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_box=BoundingBox(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.features import BoundingBox, Label
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_box=BoundingBox(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = list(outputs.keys())[0]
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'."
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = list(outputs.keys())[0]
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'."
)
else:
raise ValueError(
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
import torch
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.datapoints.Datapoint` subclass as ``like``.
If ``like`` is a :class:`~torchvision.datapoints.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.datapoints.Datapoint`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.datapoints.Datapoint` subclass as ``like``.
If ``like`` is a :class:`~torchvision.datapoints.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.datapoints.Datapoint`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
```
pip install elasticsearch
```
This script was created for `elasticsearch` v8.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Elasticsearch
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader
from langchain_community.document_loaders.assemblyai import TranscriptFormat
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TranscriptFormat": "langchain_community.document_loaders.assemblyai",
"AssemblyAIAudioTranscriptLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AssemblyAIAudioTranscriptLoader",
"TranscriptFormat",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader
from langchain_community.document_loaders.assemblyai import TranscriptFormat
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TranscriptFormat": "langchain_community.document_loaders.assemblyai",
"AssemblyAIAudioTranscriptLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TranscriptFormat",
"AssemblyAIAudioTranscriptLoader",
]
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.features import BoundingBox, Label
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.features import BoundingBox, EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_box = BoundingBox(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_box": bounding_box,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
|
"""
Using rmm with Dask
===================
"""
import dask
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from sklearn.datasets import make_classification
import xgboost as xgb
def main(client):
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
# xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
# In pratice one should prefer loading the data with dask collections instead of
# using `from_array`.
X = dask.array.from_array(X)
y = dask.array.from_array(y)
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
params = {
"max_depth": 8,
"eta": 0.01,
"objective": "multi:softprob",
"num_class": 3,
"tree_method": "hist",
"eval_metric": "merror",
"device": "cuda",
}
output = xgb.dask.train(
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
)
bst = output["booster"]
history = output["history"]
for i, e in enumerate(history["train"]["merror"]):
print(f"[{i}] train-merror: {e}")
if __name__ == "__main__":
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option
# to LocalCUDACluster constructor.
with LocalCUDACluster(rmm_pool_size="2GB") as cluster:
with Client(cluster) as client:
main(client)
|
"""
Using rmm with Dask
===================
"""
import dask
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from sklearn.datasets import make_classification
import xgboost as xgb
def main(client):
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
# xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
# In pratice one should prefer loading the data with dask collections instead of
# using `from_array`.
X = dask.array.from_array(X)
y = dask.array.from_array(y)
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
params = {
"max_depth": 8,
"eta": 0.01,
"objective": "multi:softprob",
"num_class": 3,
"tree_method": "hist",
"eval_metric": "merror",
"device": "cuda",
}
output = xgb.dask.train(
client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")]
)
bst = output["booster"]
history = output["history"]
for i, e in enumerate(history["train"]["merror"]):
print(f"[{i}] train-merror: {e}")
if __name__ == "__main__":
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option
# to LocalCUDACluster constructor.
with LocalCUDACluster(rmm_pool_size="2GB") as cluster:
with Client(cluster) as client:
main(client)
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class ReadFileInput(BaseModel):
"""Input for ReadFileTool."""
file_path: str = Field(..., description="name of file")
class ReadFileTool(BaseFileToolMixin, BaseTool):
"""Tool that reads a file."""
name: str = "read_file"
args_schema: Type[BaseModel] = ReadFileInput
description: str = "Read file from disk"
def _run(
self,
file_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
read_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
if not read_path.exists():
return f"Error: no such file or directory: {file_path}"
try:
with read_path.open("r", encoding="utf-8") as f:
content = f.read()
return content
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class ReadFileInput(BaseModel):
"""Input for ReadFileTool."""
file_path: str = Field(..., description="name of file")
class ReadFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that reads a file."""
name: str = "read_file"
args_schema: Type[BaseModel] = ReadFileInput
description: str = "Read file from disk"
def _run(
self,
file_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
read_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
if not read_path.exists():
return f"Error: no such file or directory: {file_path}"
try:
with read_path.open("r", encoding="utf-8") as f:
content = f.read()
return content
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
__all__ = ['AbstractComputationalBackend']
|
from docarray.computation.abstract_comp_backend import AbstractComputationalBackend
__all__ = ['AbstractComputationalBackend']
|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import warnings
from inspect import signature
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
Examples
--------
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : str, default=''
To be added to the deprecation messages.
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=""):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `deprecated`
# decorator is placed before the `property` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
new = cls.__new__
sig = signature(cls)
def wrapped(cls, *args, **kwargs):
warnings.warn(msg, category=FutureWarning)
if new is object.__new__:
return object.__new__(cls)
return new(cls, *args, **kwargs)
cls.__new__ = wrapped
wrapped.__name__ = "__new__"
wrapped.deprecated_original = new
# Restore the original signature, see PEP 362.
cls.__signature__ = sig
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return fun(*args, **kwargs)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
@functools.wraps(prop.fget)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
# TODO: remove in 1.7
def _deprecate_Xt_in_inverse_transform(X, Xt):
"""Helper to deprecate the `Xt` argument in favor of `X` in inverse_transform."""
if X is not None and Xt is not None:
raise TypeError("Cannot use both X and Xt. Use X only.")
if X is None and Xt is None:
raise TypeError("Missing required positional argument: X.")
if Xt is not None:
warnings.warn(
"Xt was renamed X in version 1.5 and will be removed in 1.7.",
FutureWarning,
)
return Xt
return X
# TODO(1.8): remove force_all_finite and change the default value of ensure_all_finite
# to True (remove None without deprecation).
def _deprecate_force_all_finite(force_all_finite, ensure_all_finite):
"""Helper to deprecate force_all_finite in favor of ensure_all_finite."""
if force_all_finite != "deprecated":
warnings.warn(
"'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be "
"removed in 1.8.",
FutureWarning,
)
if ensure_all_finite is not None:
raise ValueError(
"'force_all_finite' and 'ensure_all_finite' cannot be used together. "
"Pass `ensure_all_finite` only."
)
return force_all_finite
if ensure_all_finite is None:
return True
return ensure_all_finite
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import functools
import warnings
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
Examples
--------
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : str, default=''
To be added to the deprecation messages.
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=""):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `deprecated`
# decorator is placed before the `property` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
new = cls.__new__
def wrapped(cls, *args, **kwargs):
warnings.warn(msg, category=FutureWarning)
if new is object.__new__:
return object.__new__(cls)
return new(cls, *args, **kwargs)
cls.__new__ = wrapped
wrapped.__name__ = "__new__"
wrapped.deprecated_original = new
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return fun(*args, **kwargs)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
@functools.wraps(prop.fget)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=FutureWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _is_deprecated(func):
"""Helper to check if func is wrapped by our deprecated decorator"""
closures = getattr(func, "__closure__", [])
if closures is None:
closures = []
is_deprecated = "deprecated" in "".join(
[c.cell_contents for c in closures if isinstance(c.cell_contents, str)]
)
return is_deprecated
# TODO: remove in 1.7
def _deprecate_Xt_in_inverse_transform(X, Xt):
"""Helper to deprecate the `Xt` argument in favor of `X` in inverse_transform."""
if X is not None and Xt is not None:
raise TypeError("Cannot use both X and Xt. Use X only.")
if X is None and Xt is None:
raise TypeError("Missing required positional argument: X.")
if Xt is not None:
warnings.warn(
"Xt was renamed X in version 1.5 and will be removed in 1.7.",
FutureWarning,
)
return Xt
return X
# TODO(1.8): remove force_all_finite and change the default value of ensure_all_finite
# to True (remove None without deprecation).
def _deprecate_force_all_finite(force_all_finite, ensure_all_finite):
"""Helper to deprecate force_all_finite in favor of ensure_all_finite."""
if force_all_finite != "deprecated":
warnings.warn(
"'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be "
"removed in 1.8.",
FutureWarning,
)
if ensure_all_finite is not None:
raise ValueError(
"'force_all_finite' and 'ensure_all_finite' cannot be used together. "
"Pass `ensure_all_finite` only."
)
return force_all_finite
if ensure_all_finite is None:
return True
return ensure_all_finite
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
def _append(self, value: 'Document'):
self._extend([value])
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def _extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
def _append(self, value: 'Document'):
self.extend([value])
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
_base_ = [
'./bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_'
'test-mot17halfval.py'
]
dataset_type = 'MOTChallengeDataset'
img_scale = (1600, 896) # weight, height
model = dict(
data_preprocessor=dict(
type='TrackDataPreprocessor',
use_det_processor=True,
pad_size_divisor=32,
batch_augments=[
dict(type='BatchSyncRandomResize', random_size_range=(640, 1152))
]),
tracker=dict(
weight_iou_with_det_scores=False,
match_iou_thrs=dict(high=0.3),
))
train_pipeline = [
dict(
type='Mosaic',
img_scale=img_scale,
pad_val=114.0,
bbox_clip_border=True),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2),
bbox_clip_border=True),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0,
bbox_clip_border=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(
type='Resize',
scale=img_scale,
keep_ratio=True,
clip_object_border=True),
dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
size_divisor=32,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadTrackAnnotations'),
]),
dict(type='PackTrackInputs')
]
train_dataloader = dict(
dataset=dict(
type='MultiImageMixDataset',
dataset=dict(
type='ConcatDataset',
datasets=[
dict(
type='CocoDataset',
data_root='data/MOT20',
ann_file='annotations/train_cocoformat.json',
# TODO: mmdet use img as key, but img_path is needed
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_train.json',
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_val.json',
data_prefix=dict(img='val'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian', )),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
]),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(ann_file='annotations/train_cocoformat.json'))
test_dataloader = dict(
dataset=dict(
data_root='data/MOT20', ann_file='annotations/test_cocoformat.json'))
test_evaluator = dict(
type='MOTChallengeMetrics',
postprocess_tracklet_cfg=[
dict(type='InterpolateTracklets', min_num_frames=5, max_num_frames=20)
],
format_only=True,
outfile_prefix='./mot_20_test_res')
|
_base_ = [
'./bytetrack_yolox_x_8xb4-80e_crowdhuman-mot17halftrain_'
'test-mot17halfval.py'
]
dataset_type = 'MOTChallengeDataset'
img_scale = (896, 1600) # w, h
model = dict(
data_preprocessor=dict(
type='TrackDataPreprocessor',
use_det_processor=True,
pad_size_divisor=32,
batch_augments=[
dict(type='BatchSyncRandomResize', random_size_range=(640, 1152))
]),
tracker=dict(
weight_iou_with_det_scores=False,
match_iou_thrs=dict(high=0.3),
))
train_pipeline = [
dict(
type='Mosaic',
img_scale=img_scale,
pad_val=114.0,
bbox_clip_border=True),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2),
bbox_clip_border=True),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0,
bbox_clip_border=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(
type='Resize',
scale=img_scale,
keep_ratio=True,
clip_object_border=True),
dict(type='Pad', size_divisor=32, pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
size_divisor=32,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadTrackAnnotations'),
]),
dict(type='PackTrackInputs')
]
train_dataloader = dict(
dataset=dict(
type='MultiImageMixDataset',
dataset=dict(
type='ConcatDataset',
datasets=[
dict(
type='CocoDataset',
data_root='data/MOT20',
ann_file='annotations/train_cocoformat.json',
# TODO: mmdet use img as key, but img_path is needed
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian')),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_train.json',
data_prefix=dict(img='train'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian')),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
dict(
type='CocoDataset',
data_root='data/crowdhuman',
ann_file='annotations/crowdhuman_val.json',
data_prefix=dict(img='val'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
metainfo=dict(classes=('pedestrian')),
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]),
]),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
pin_memory=True,
drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
sampler=dict(type='TrackImgSampler'),
dataset=dict(
type=dataset_type,
data_root='data/MOT17',
ann_file='annotations/train_cocoformat.json',
data_prefix=dict(img_path='train'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='TrackImgSampler'),
dataset=dict(
type=dataset_type,
data_root='data/MOT20',
ann_file='annotations/test_cocoformat.json',
data_prefix=dict(img_path='test'),
test_mode=True,
pipeline=test_pipeline))
test_evaluator = dict(
type='MOTChallengeMetrics',
postprocess_tracklet_cfg=[
dict(type='InterpolateTracklets', min_num_frames=5, max_num_frames=20)
],
format_only=True,
outfile_prefix='./mot_20_test_res')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'revert_sync_batchnorm', 'collect_env'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'revert_sync_batchnorm'
]
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.26.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from __future__ import annotations
from .MLMTransformer import MLMTransformer
from .SparseAutoEncoder import SparseAutoEncoder
from .SparseStaticEmbedding import SparseStaticEmbedding
from .SpladePooling import SpladePooling
__all__ = ["SparseAutoEncoder", "MLMTransformer", "SpladePooling", "SparseStaticEmbedding"]
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .IDF import IDF
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
__all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling", "IDF"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import MaskedConv2d
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
@MODELS.register_module()
class GARetinaHead(GuidedAnchorHead):
"""Guided-Anchor-based RetinaNet head."""
def __init__(self,
num_classes: int,
in_channels: int,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01),
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)
])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
num_anchors = self.square_anchor_generator.num_base_priors[0]
self.conv_shape = nn.Conv2d(self.feat_channels, num_anchors * 2, 1)
self.feature_adaption_cls = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = MaskedConv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x: Tensor) -> Tuple[Tensor]:
"""Forward feature map of a single scale level."""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import MaskedConv2d
from torch import Tensor
from mmdet.core import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
@MODELS.register_module()
class GARetinaHead(GuidedAnchorHead):
"""Guided-Anchor-based RetinaNet head."""
def __init__(self,
num_classes: int,
in_channels: int,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01),
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)
])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
num_anchors = self.square_anchor_generator.num_base_priors[0]
self.conv_shape = nn.Conv2d(self.feat_channels, num_anchors * 2, 1)
self.feature_adaption_cls = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = MaskedConv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x: Tensor) -> Tuple[Tensor]:
"""Forward feature map of a single scale level."""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList, BaseDoc
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
if not docarray_v2:
da = DocumentArray([])
else:
if len(batch) > 0:
da = DocList[batch[0].__class__]()
else:
da = DocList[BaseDoc]()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.document_array_cls = da.__class__
req.data.docs = da
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList, BaseDoc
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
if not docarray_v2:
da = DocumentArray([])
else:
if len(batch) > 0:
da = DocList[batch[0].__class__]()
else:
da = DocList[BaseDoc]()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.document_array_cls = da.__class__
req.data.docs = da
|
from __future__ import annotations
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class FGVCAircraft(VisionDataset):
"""`FGVC Aircraft <https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/>`_ Dataset.
The dataset contains 10,000 images of aircraft, with 100 images for each of 100
different aircraft model variants, most of which are airplanes.
Aircraft models are organized in a three-levels hierarchy. The three levels, from
finer to coarser, are:
- ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually
indistinguishable into one class. The dataset comprises 100 different variants.
- ``family``, e.g. Boeing 737. The dataset comprises 70 different families.
- ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers.
Args:
root (str or ``pathlib.Path``): Root directory of the FGVC Aircraft dataset.
split (string, optional): The dataset split, supports ``train``, ``val``,
``trainval`` and ``test``.
annotation_level (str, optional): The annotation level, supports ``variant``,
``family`` and ``manufacturer``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz"
def __init__(
self,
root: Union[str, Path],
split: str = "trainval",
annotation_level: str = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._annotation_level = verify_str_arg(
annotation_level, "annotation_level", ("variant", "family", "manufacturer")
)
self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b")
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
annotation_file = os.path.join(
self._data_path,
"data",
{
"variant": "variants.txt",
"family": "families.txt",
"manufacturer": "manufacturers.txt",
}[self._annotation_level],
)
with open(annotation_file, "r") as f:
self.classes = [line.strip() for line in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
image_data_folder = os.path.join(self._data_path, "data", "images")
labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt")
self._image_files = []
self._labels = []
with open(labels_file, "r") as f:
for line in f:
image_name, label_name = line.strip().split(" ", 1)
self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg"))
self._labels.append(self.class_to_idx[label_name])
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _download(self) -> None:
"""
Download the FGVC Aircraft dataset archive and extract it under root.
"""
if self._check_exists():
return
download_and_extract_archive(self._URL, self.root)
def _check_exists(self) -> bool:
return os.path.exists(self._data_path) and os.path.isdir(self._data_path)
|
from __future__ import annotations
import os
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class FGVCAircraft(VisionDataset):
"""`FGVC Aircraft <https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/>`_ Dataset.
The dataset contains 10,000 images of aircraft, with 100 images for each of 100
different aircraft model variants, most of which are airplanes.
Aircraft models are organized in a three-levels hierarchy. The three levels, from
finer to coarser, are:
- ``variant``, e.g. Boeing 737-700. A variant collapses all the models that are visually
indistinguishable into one class. The dataset comprises 100 different variants.
- ``family``, e.g. Boeing 737. The dataset comprises 70 different families.
- ``manufacturer``, e.g. Boeing. The dataset comprises 30 different manufacturers.
Args:
root (string): Root directory of the FGVC Aircraft dataset.
split (string, optional): The dataset split, supports ``train``, ``val``,
``trainval`` and ``test``.
annotation_level (str, optional): The annotation level, supports ``variant``,
``family`` and ``manufacturer``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_URL = "https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz"
def __init__(
self,
root: str,
split: str = "trainval",
annotation_level: str = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._annotation_level = verify_str_arg(
annotation_level, "annotation_level", ("variant", "family", "manufacturer")
)
self._data_path = os.path.join(self.root, "fgvc-aircraft-2013b")
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
annotation_file = os.path.join(
self._data_path,
"data",
{
"variant": "variants.txt",
"family": "families.txt",
"manufacturer": "manufacturers.txt",
}[self._annotation_level],
)
with open(annotation_file, "r") as f:
self.classes = [line.strip() for line in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
image_data_folder = os.path.join(self._data_path, "data", "images")
labels_file = os.path.join(self._data_path, "data", f"images_{self._annotation_level}_{self._split}.txt")
self._image_files = []
self._labels = []
with open(labels_file, "r") as f:
for line in f:
image_name, label_name = line.strip().split(" ", 1)
self._image_files.append(os.path.join(image_data_folder, f"{image_name}.jpg"))
self._labels.append(self.class_to_idx[label_name])
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _download(self) -> None:
"""
Download the FGVC Aircraft dataset archive and extract it under root.
"""
if self._check_exists():
return
download_and_extract_archive(self._URL, self.root)
def _check_exists(self) -> bool:
return os.path.exists(self._data_path) and os.path.isdir(self._data_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.