input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction, observation: str
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return ToolMessage(
tool_call_id=agent_action.tool_call_id,
content=content,
additional_kwargs={"name": agent_action.tool},
)
def format_to_tool_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into ToolMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction.
"""
messages = []
for agent_action, observation in intermediate_steps:
if isinstance(agent_action, ToolAgentAction):
new_messages = [
*list(agent_action.message_log),
_create_tool_message(agent_action, observation),
]
messages.extend([new for new in new_messages if new not in messages])
else:
messages.append(AIMessage(content=agent_action.log))
return messages
|
import json
from collections.abc import Sequence
from langchain_core.agents import AgentAction
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolMessage,
)
from langchain.agents.output_parsers.tools import ToolAgentAction
def _create_tool_message(
agent_action: ToolAgentAction, observation: str
) -> ToolMessage:
"""Convert agent action and observation into a tool message.
Args:
agent_action: the tool invocation request from the agent.
observation: the result of the tool invocation.
Returns:
ToolMessage that corresponds to the original tool invocation.
Raises:
ValueError: if the observation cannot be converted to a string.
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return ToolMessage(
tool_call_id=agent_action.tool_call_id,
content=content,
additional_kwargs={"name": agent_action.tool},
)
def format_to_tool_messages(
intermediate_steps: Sequence[tuple[AgentAction, str]],
) -> list[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into ToolMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations.
Returns:
list of messages to send to the LLM for the next prediction.
"""
messages = []
for agent_action, observation in intermediate_steps:
if isinstance(agent_action, ToolAgentAction):
new_messages = list(agent_action.message_log) + [
_create_tool_message(agent_action, observation)
]
messages.extend([new for new in new_messages if new not in messages])
else:
messages.append(AIMessage(content=agent_action.log))
return messages
|
"""Embeddings."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ("DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings")
_dynamic_imports = {
"Embeddings": "embeddings",
"DeterministicFakeEmbedding": "fake",
"FakeEmbeddings": "fake",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Embeddings."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ["DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings"]
_dynamic_imports = {
"Embeddings": "embeddings",
"DeterministicFakeEmbedding": "fake",
"FakeEmbeddings": "fake",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
import sys
import warnings
import torch
from torch.onnx import symbolic_opset11 as opset11
from torch.onnx.symbolic_helper import parse_args
_ONNX_OPSET_VERSION_11 = 11
_ONNX_OPSET_VERSION_16 = 16
BASE_ONNX_OPSET_VERSION = _ONNX_OPSET_VERSION_11
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = opset11.unsqueeze(g, boxes, 0)
scores = opset11.unsqueeze(g, opset11.unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
# Cast boxes and scores to float32 in case they are float64 inputs
nms_out = g.op(
"NonMaxSuppression",
g.op("Cast", boxes, to_i=torch.onnx.TensorProtoDataType.FLOAT),
g.op("Cast", scores, to_i=torch.onnx.TensorProtoDataType.FLOAT),
max_output_per_class,
iou_threshold,
)
return opset11.squeeze(
g, opset11.select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1
)
def _process_batch_indices_for_roi_align(g, rois):
indices = opset11.squeeze(
g, opset11.select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1
)
return g.op("Cast", indices, to_i=torch.onnx.TensorProtoDataType.INT64)
def _process_rois_for_roi_align(g, rois):
return opset11.select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
def _process_sampling_ratio_for_roi_align(g, sampling_ratio: int):
if sampling_ratio < 0:
warnings.warn(
"ONNX export for RoIAlign with a non-zero sampling_ratio is not supported. "
"The model will be exported with a sampling_ratio of 0."
)
sampling_ratio = 0
return sampling_ratio
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset11(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
if aligned:
warnings.warn(
"ROIAlign with aligned=True is only supported in opset >= 16. "
"Please export with opset 16 or higher, or use aligned=False."
)
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset16(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
coordinate_transformation_mode = "half_pixel" if aligned else "output_half_pixel"
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
coordinate_transformation_mode_s=coordinate_transformation_mode,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
def _register_custom_op():
torch.onnx.register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _ONNX_OPSET_VERSION_11)
torch.onnx.register_custom_op_symbolic("torchvision::roi_align", roi_align_opset11, _ONNX_OPSET_VERSION_11)
torch.onnx.register_custom_op_symbolic("torchvision::roi_align", roi_align_opset16, _ONNX_OPSET_VERSION_16)
torch.onnx.register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _ONNX_OPSET_VERSION_11)
|
import sys
import warnings
import torch
_onnx_opset_version_11 = 11
_onnx_opset_version_16 = 16
base_onnx_opset_version = _onnx_opset_version_11
def _register_custom_op():
from torch.onnx.symbolic_helper import parse_args
from torch.onnx.symbolic_opset11 import select, squeeze, unsqueeze
@parse_args("v", "v", "f")
def symbolic_multi_label_nms(g, boxes, scores, iou_threshold):
boxes = unsqueeze(g, boxes, 0)
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
max_output_per_class = g.op("Constant", value_t=torch.tensor([sys.maxsize], dtype=torch.long))
iou_threshold = g.op("Constant", value_t=torch.tensor([iou_threshold], dtype=torch.float))
nms_out = g.op(
"NonMaxSuppression",
g.op("Cast", boxes, to_i=torch.onnx.TensorProtoDataType.FLOAT),
g.op("Cast", scores, to_i=torch.onnx.TensorProtoDataType.FLOAT),
max_output_per_class,
iou_threshold,
)
return squeeze(g, select(g, nms_out, 1, g.op("Constant", value_t=torch.tensor([2], dtype=torch.long))), 1)
def _process_batch_indices_for_roi_align(g, rois):
indices = squeeze(g, select(g, rois, 1, g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))), 1)
return g.op("Cast", indices, to_i=torch.onnx.TensorProtoDataType.INT64)
def _process_rois_for_roi_align(g, rois):
return select(g, rois, 1, g.op("Constant", value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
def _process_sampling_ratio_for_roi_align(g, sampling_ratio: int):
if sampling_ratio < 0:
warnings.warn(
"ONNX export for RoIAlign with a non-zero sampling_ratio is not supported. "
"The model will be exported with a sampling_ratio of 0."
)
sampling_ratio = 0
return sampling_ratio
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset11(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
if aligned:
warnings.warn(
"ROIAlign with aligned=True is only supported in opset >= 16. "
"Please export with opset 16 or higher, or use aligned=False."
)
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i", "i", "i")
def roi_align_opset16(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
batch_indices = _process_batch_indices_for_roi_align(g, rois)
rois = _process_rois_for_roi_align(g, rois)
coordinate_transformation_mode = "half_pixel" if aligned else "output_half_pixel"
sampling_ratio = _process_sampling_ratio_for_roi_align(g, sampling_ratio)
return g.op(
"RoiAlign",
input,
rois,
batch_indices,
coordinate_transformation_mode_s=coordinate_transformation_mode,
spatial_scale_f=spatial_scale,
output_height_i=pooled_height,
output_width_i=pooled_width,
sampling_ratio_i=sampling_ratio,
)
@parse_args("v", "v", "f", "i", "i")
def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width):
roi_pool = g.op(
"MaxRoiPool", input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale
)
return roi_pool, None
from torch.onnx import register_custom_op_symbolic
register_custom_op_symbolic("torchvision::nms", symbolic_multi_label_nms, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset11, _onnx_opset_version_11)
register_custom_op_symbolic("torchvision::roi_align", roi_align_opset16, _onnx_opset_version_16)
register_custom_op_symbolic("torchvision::roi_pool", roi_pool, _onnx_opset_version_11)
|
"""Utils for manipulating images."""
import base64
from io import BytesIO
from typing import cast
from PIL import Image
from PIL.ImageFile import ImageFile
def img_2_b64(image: ImageFile, format: str = "JPEG") -> str:
"""
Convert a PIL.Image to a base64 encoded image string.
Args:
image (ImageFile): The PIL Image object to be converted.
format (str, optional): The image format to save as. Defaults to "JPEG".
Returns:
str: A base64 encoded string representation of the image.
"""
buff = BytesIO()
image.save(buff, format=format)
return cast(str, base64.b64encode(buff.getvalue()))
def b64_2_img(data: str) -> ImageFile:
"""
Convert base64 encoded image string to a PIL.Image.
Args:
data (str): The base64 encoded image string.
Returns:
ImageFile: A PIL Image object.
"""
buff = BytesIO(base64.b64decode(data))
return cast(ImageFile, Image.open(buff))
|
"""Utils for manipulating images."""
import base64
from io import BytesIO
from typing import cast
from PIL import Image
from PIL.ImageFile import ImageFile
def img_2_b64(image: ImageFile, format: str = "JPEG") -> str:
"""
Convert a PIL.Image to a base64 encoded image string.
Args:
image (ImageFile): The PIL Image object to be converted.
format (str, optional): The image format to save as. Defaults to "JPEG".
Returns:
str: A base64 encoded string representation of the image.
"""
buff = BytesIO()
image.save(buff, format=format)
return cast(str, base64.b64encode(buff.getvalue()))
def b64_2_img(data: str) -> ImageFile:
"""
Convert base64 encoded image string to a PIL.Image.
Args:
data (str): The base64 encoded image string.
Returns:
ImageFile: A PIL Image object.
"""
buff = BytesIO(base64.b64decode(data))
return cast(ImageFile, Image.open(buff))
|
"""Tool for the Google Scholar"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
class GoogleScholarQueryRun(BaseTool):
"""Tool that queries the Google search API."""
name: str = "google_scholar"
description: str = (
"A wrapper around Google Scholar Search. "
"Useful for when you need to get information about"
"research papers from Google Scholar"
"Input should be a search query."
)
api_wrapper: GoogleScholarAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Google Scholar"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
class GoogleScholarQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google search API."""
name: str = "google_scholar"
description: str = (
"A wrapper around Google Scholar Search. "
"Useful for when you need to get information about"
"research papers from Google Scholar"
"Input should be a search query."
)
api_wrapper: GoogleScholarAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
doc_vec = DocVec[MyDoc](docs)
storage = doc_vec._storage
result = str(doc_vec[0])
assert 'MyDoc' in result
assert 'id' in result
assert 'tensor' in result
assert 'name' in result
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=str(i)) for i in range(4)]
doc_vec = DocVec[MyDoc](docs)
storage = doc_vec._storage
result = str(doc_vec[0])
assert 'MyDoc' in result
assert 'id' in result
assert 'tensor' in result
assert 'name' in result
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import Document, Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(Document):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(Document):
img: Image
class OutputDoc(Document):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(Document):
text: str
class OutputDoc(Document):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import Document, Image, Text
from docarray.typing import Tensor
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(Document):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(Document):
img: Image
class OutputDoc(Document):
embedding_clip: Tensor
embedding_bert: Tensor
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(Document):
text: str
class OutputDoc(Document):
embedding_clip: Tensor
embedding_bert: Tensor
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
enable_flash_attention as enable_flash_attention,
)
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import (
is_flash_attention_enabled as is_flash_attention_enabled,
)
from keras.src.backend.config import max_epochs as max_epochs
from keras.src.backend.config import max_steps_per_epoch as max_steps_per_epoch
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.backend.config import set_max_epochs as set_max_epochs
from keras.src.backend.config import (
set_max_steps_per_epoch as set_max_steps_per_epoch,
)
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.saving.serialization_lib import (
enable_unsafe_deserialization as enable_unsafe_deserialization,
)
from keras.src.utils.backend_utils import set_backend as set_backend
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.traceback_utils import (
disable_traceback_filtering as disable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
enable_traceback_filtering as enable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
is_traceback_filtering_enabled as is_traceback_filtering_enabled,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
enable_flash_attention as enable_flash_attention,
)
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import (
is_flash_attention_enabled as is_flash_attention_enabled,
)
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.saving.serialization_lib import (
enable_unsafe_deserialization as enable_unsafe_deserialization,
)
from keras.src.utils.backend_utils import set_backend as set_backend
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.traceback_utils import (
disable_traceback_filtering as disable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
enable_traceback_filtering as enable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
is_traceback_filtering_enabled as is_traceback_filtering_enabled,
)
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='RandomSamplingNegPos',
tokenizer_name=_base_.lang_model_name,
num_sample_negative=85,
# change this
label_map_file='data/coco/annotations/lvis_v1_label_map.json',
max_tokens=256),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities', 'tokens_positive', 'dataset_mode'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='ODVGDataset',
data_root=data_root,
need_text=False,
label_map_file='annotations/lvis_v1_label_map.json',
ann_file='annotations/lvis_v1_train_od.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
return_classes=True,
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
data_root=data_root,
type='LVISV1Dataset',
ann_file='annotations/lvis_v1_minival_inserted_image_name.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='LVISFixedAPMetric',
ann_file=data_root +
'annotations/lvis_v1_minival_inserted_image_name.json')
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
# 'language_model': dict(lr_mult=0),
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=3)
default_hooks = dict(
checkpoint=dict(
max_keep_ckpts=1, save_best='lvis_fixed_ap/AP', rule='greater'))
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/coco/'
model = dict(test_cfg=dict(
max_per_img=300,
chunked_size=40,
))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='RandomSamplingNegPos',
tokenizer_name=_base_.lang_model_name,
num_sample_negative=85,
# change this
label_map_file='data/coco/annotations/lvis_v1_label_map.json',
max_tokens=256),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities', 'tokens_positive', 'dataset_mode'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='ODVGDataset',
data_root=data_root,
need_text=False,
label_map_file='annotations/lvis_v1_label_map.json',
ann_file='annotations/lvis_v1_train_od.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
return_classes=True,
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
data_root=data_root,
type='LVISV1Dataset',
ann_file='annotations/lvis_v1_minival_inserted_image_name.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='LVISFixedAPMetric',
ann_file=data_root +
'annotations/lvis_v1_minival_inserted_image_name.json')
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1),
# 'language_model': dict(lr_mult=0),
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=3)
default_hooks = dict(
checkpoint=dict(
max_keep_ckpts=1, save_best='lvis_fixed_ap/AP', rule='greater'))
load_from = ''
|
from typing import Type, TYPE_CHECKING
from docarray import Document
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :class:`DocumentArray` object with :attr:`size` empty
:class:`Document` objects.
:param size: the number of empty Documents in this container
:return: a :class:`DocumentArray` object
"""
return cls((Document() for _ in range(size)), *args, **kwargs)
|
from typing import Type, TYPE_CHECKING
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import T
class EmptyMixin:
"""Helper functions for building arrays with empty Document."""
@classmethod
def empty(cls: Type['T'], size: int = 0, *args, **kwargs) -> 'T':
"""Create a :class:`DocumentArray` object with :attr:`size` empty
:class:`Document` objects.
:param size: the number of empty Documents in this container
:return: a :class:`DocumentArray` object
"""
return cls((Document() for _ in range(size)), *args, **kwargs)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.Speed(1000, 0.9).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_AddNoise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, self.dtype)
output = add_noise(waveform, noise, lengths, snr)
ts_output = torch_script(add_noise)(waveform, noise, lengths, snr)
self.assertEqual(ts_output, output)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.Speed(1000, 0.9).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=self.dtype, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(self.device, self.dtype)
output = speed(waveform, lengths)
ts_output = torch_script(speed)(waveform, lengths)
self.assertEqual(ts_output, output)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.60
Model Sparsity: Active Dimensions: 112.3, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4450
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity: Active Dimensions: 113.6, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.15.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.17'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.15.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.17'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_ct-improved-{}-{}".format(
model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
train_sentences.append(InputExample(texts=[line.strip(), line.strip()]))
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model, scale=1, similarity_fct=util.dot_score)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
warmup_steps=1000,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_ct-improved-{}-{}".format(
model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
train_sentences.append(InputExample(texts=[line.strip(), line.strip()]))
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model, scale=1, similarity_fct=util.dot_score)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
warmup_steps=1000,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import hashlib
import logging
from os import PathLike
from pathlib import Path
from typing import Union
import torch
from torchaudio._internal import download_url_to_file
_LG = logging.getLogger(__name__)
def _get_local_path(key):
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
path.parent.mkdir(parents=True, exist_ok=True)
return path
def _download(key, path, progress):
url = f"https://download.pytorch.org/torchaudio/{key}"
download_url_to_file(url, path, progress=progress)
def _get_hash(path, hash, chunk_size=1028):
m = hashlib.sha256()
with open(path, "rb") as file:
data = file.read(chunk_size)
while data:
m.update(data)
data = file.read(chunk_size)
return m.hexdigest()
from torchaudio._internal.module_utils import dropping_support
@dropping_support
def download_asset(
key: str,
hash: str = "",
path: Union[str, PathLike] = "",
*,
progress: bool = True,
) -> str:
"""Download and store torchaudio assets to local file system.
If a file exists at the download path, then that path is returned with or without
hash validation.
Args:
key (str): The asset identifier.
hash (str, optional):
The value of SHA256 hash of the asset. If provided, it is used to verify
the downloaded / cached object. If not provided, then no hash validation
is performed. This means if a file exists at the download path, then the path
is returned as-is without verifying the identity of the file.
path (path-like object, optional):
By default, the downloaded asset is saved in a directory under
:py:func:`torch.hub.get_dir` and intermediate directories based on the given `key`
are created.
This argument can be used to overwrite the target location.
When this argument is provided, all the intermediate directories have to be
created beforehand.
progress (bool): Whether to show progress bar for downloading. Default: ``True``.
Note:
Currently the valid key values are the route on ``download.pytorch.org/torchaudio``,
but this is an implementation detail.
Returns:
str: The path to the asset on the local file system.
"""
path = path or _get_local_path(key)
if path.exists():
_LG.info("The local file (%s) exists. Skipping the download.", path)
else:
_LG.info("Downloading %s to %s", key, path)
_download(key, path, progress=progress)
if hash:
_LG.info("Verifying the hash value.")
digest = _get_hash(path, hash)
if digest != hash:
raise ValueError(
f"The hash value of the downloaded file ({path}), '{digest}' does not match "
f"the provided hash value, '{hash}'."
)
_LG.info("Hash validated.")
return str(path)
|
import hashlib
import logging
from os import PathLike
from pathlib import Path
from typing import Union
import torch
from torchaudio._internal import download_url_to_file
_LG = logging.getLogger(__name__)
def _get_local_path(key):
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
path.parent.mkdir(parents=True, exist_ok=True)
return path
def _download(key, path, progress):
url = f"https://download.pytorch.org/torchaudio/{key}"
download_url_to_file(url, path, progress=progress)
def _get_hash(path, hash, chunk_size=1028):
m = hashlib.sha256()
with open(path, "rb") as file:
data = file.read(chunk_size)
while data:
m.update(data)
data = file.read(chunk_size)
return m.hexdigest()
def download_asset(
key: str,
hash: str = "",
path: Union[str, PathLike] = "",
*,
progress: bool = True,
) -> str:
"""Download and store torchaudio assets to local file system.
If a file exists at the download path, then that path is returned with or without
hash validation.
Args:
key (str): The asset identifier.
hash (str, optional):
The value of SHA256 hash of the asset. If provided, it is used to verify
the downloaded / cached object. If not provided, then no hash validation
is performed. This means if a file exists at the download path, then the path
is returned as-is without verifying the identity of the file.
path (path-like object, optional):
By default, the downloaded asset is saved in a directory under
:py:func:`torch.hub.get_dir` and intermediate directories based on the given `key`
are created.
This argument can be used to overwrite the target location.
When this argument is provided, all the intermediate directories have to be
created beforehand.
progress (bool): Whether to show progress bar for downloading. Default: ``True``.
Note:
Currently the valid key values are the route on ``download.pytorch.org/torchaudio``,
but this is an implementation detail.
Returns:
str: The path to the asset on the local file system.
"""
path = path or _get_local_path(key)
if path.exists():
_LG.info("The local file (%s) exists. Skipping the download.", path)
else:
_LG.info("Downloading %s to %s", key, path)
_download(key, path, progress=progress)
if hash:
_LG.info("Verifying the hash value.")
digest = _get_hash(path, hash)
if digest != hash:
raise ValueError(
f"The hash value of the downloaded file ({path}), '{digest}' does not match "
f"the provided hash value, '{hash}'."
)
_LG.info("Hash validated.")
return str(path)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PromptLayerChatOpenAI": "langchain_community.chat_models.promptlayer_openai",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PromptLayerChatOpenAI",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PromptLayerChatOpenAI": "langchain_community.chat_models.promptlayer_openai"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PromptLayerChatOpenAI",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
|
"""Test Prediction Guard API wrapper."""
import pytest
from langchain_community.llms.predictionguard import PredictionGuard
def test_predictionguard_invoke() -> None:
"""Test valid call to prediction guard."""
llm = PredictionGuard(model="Hermes-3-Llama-3.1-8B")
output = llm.invoke("Tell a joke.")
assert isinstance(output, str)
def test_predictionguard_pii() -> None:
llm = PredictionGuard(
model="Hermes-3-Llama-3.1-8B",
predictionguard_input={"pii": "block"},
max_tokens=100,
temperature=1.0,
)
messages = [
"Hello, my name is John Doe and my SSN is 111-22-3333",
]
with pytest.raises(ValueError, match=r"Could not make prediction. pii detected"):
llm.invoke(messages)
|
"""Test Prediction Guard API wrapper."""
import pytest
from langchain_community.llms.predictionguard import PredictionGuard
def test_predictionguard_invoke() -> None:
"""Test valid call to prediction guard."""
llm = PredictionGuard(model="Hermes-3-Llama-3.1-8B") # type: ignore[call-arg]
output = llm.invoke("Tell a joke.")
assert isinstance(output, str)
def test_predictionguard_pii() -> None:
llm = PredictionGuard(
model="Hermes-3-Llama-3.1-8B",
predictionguard_input={"pii": "block"},
max_tokens=100,
temperature=1.0,
)
messages = [
"Hello, my name is John Doe and my SSN is 111-22-3333",
]
with pytest.raises(ValueError, match=r"Could not make prediction. pii detected"):
llm.invoke(messages)
|
from dataclasses import dataclass
from typing import Optional
@dataclass
class HubExecutor:
"""Basic Executor Data Class from Hubble"""
uuid: str = None
name: Optional[str] = None
commit_id: Optional[str] = None
tag: Optional[str] = None
visibility: Optional[bool] = None
image_name: Optional[str] = None
archive_url: Optional[str] = None
md5sum: Optional[str] = None
build_env: Optional[list] = None
|
from dataclasses import dataclass
from typing import Optional
@dataclass
class HubExecutor:
"""Basic Executor Data Class from Hubble"""
uuid: str = None
name: Optional[str] = None
commit_id: Optional[str] = None
tag: Optional[str] = None
visibility: Optional[bool] = None
image_name: Optional[str] = None
archive_url: Optional[str] = None
md5sum: Optional[str] = None
|
import pytest
from jina import Flow
from jina.enums import ProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client.args.protocol == ProtocolType.from_string(protocol)
# gateway_args returns multiple protocols
assert f.gateway_args.protocol[0] == ProtocolType.from_string(protocol)
# flow returns single or multiple protocols
assert f.protocol == ProtocolType.from_string(protocol)
assert f.client.args.port == 12345
# gateway_args returns multiple ports
assert f.gateway_args.port[0] == 12345
# flow returns single or multiple ports
assert f.port == 12345
f._update_network_interface(port=54321)
assert f.client.args.port == 54321
assert f.gateway_args.port[0] == 54321
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client.args.protocol == GatewayProtocolType.from_string(protocol)
# gateway_args returns multiple protocols
assert f.gateway_args.protocol[0] == GatewayProtocolType.from_string(protocol)
# flow returns single or multiple protocols
assert f.protocol == GatewayProtocolType.from_string(protocol)
assert f.client.args.port == 12345
# gateway_args returns multiple ports
assert f.gateway_args.port[0] == 12345
# flow returns single or multiple ports
assert f.port == 12345
f._update_network_interface(port=54321)
assert f.client.args.port == 54321
assert f.gateway_args.port[0] == 54321
|
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: str | None = Query(min_length=3)):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: str | None = Query(default=..., min_length=3)):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
):
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_valid_text_extension = ext in TEXT_FILE_FORMATS or ext == ''
if not has_valid_text_extension:
raise ValueError('Text URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from typing import Optional, TYPE_CHECKING, TypeVar, Type, Union, Any
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
):
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_valid_text_extension = ext in TEXT_FILE_FORMATS or ext == ''
if not has_valid_text_extension:
raise ValueError('Text URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
"""Test indices/utils.py."""
import pytest
from llama_index.core.indices.utils import expand_tokens_with_subtokens
def test_expand_tokens_with_subtokens() -> None:
"""Test expand tokens."""
tokens = {"foo bar", "baz", "hello hello world bye"}
keywords = expand_tokens_with_subtokens(tokens)
assert keywords == {
"foo bar",
"foo",
"bar",
"baz",
"hello hello world bye",
"hello",
"world",
"bye",
}
parse_choice_test_lines = [
""" Doc: 2, Relevance: 8 (The document mentions taking a "tasty turn around Barcelona\'s Santa Caterina market" and listening to an episode about Barcelona.)\nDoc: 4, Relevance: 6 (The document mentions Ferramenta in Barcelona and recommends cocktails and pasta dishes that can be tried there.)""",
"Doc: 2, Relevance: 8\nDoc: 4, Relevance: 6",
"answer_num: 2, answer_relevance:8\nanswer_num: 4, answer_relevance:6",
]
@pytest.mark.parametrize("answer", parse_choice_test_lines)
def test_default_parse_choice_select_answer_fn(answer):
from llama_index.core.indices.utils import default_parse_choice_select_answer_fn
answer_nums, answer_relevances = default_parse_choice_select_answer_fn(answer, 5)
assert answer_nums == [2, 4]
assert answer_relevances == [8, 6]
|
"""Test indices/utils.py."""
import pytest
from llama_index.core.indices.utils import expand_tokens_with_subtokens
def test_expand_tokens_with_subtokens() -> None:
"""Test expand tokens."""
tokens = {"foo bar", "baz", "hello hello world bye"}
keywords = expand_tokens_with_subtokens(tokens)
assert keywords == {
"foo bar",
"foo",
"bar",
"baz",
"hello hello world bye",
"hello",
"world",
"bye",
}
parse_choice_test_lines = [
""" Doc: 2, Relevance: 8 (The document mentions taking a "tasty turn around Barcelona\'s Santa Caterina market" and listening to an episode about Barcelona.)\nDoc: 4, Relevance: 6 (The document mentions Ferramenta in Barcelona and recommends cocktails and pasta dishes that can be tried there.)""",
"Doc: 2, Relevance: 8\nDoc: 4, Relevance: 6",
"answer_num: 2, answer_relevance:8\nanswer_num: 4, answer_relevance:6",
]
@pytest.mark.parametrize("answer", parse_choice_test_lines)
def test_default_parse_choice_select_answer_fn(answer):
from llama_index.core.indices.utils import default_parse_choice_select_answer_fn
answer_nums, answer_relevances = default_parse_choice_select_answer_fn(answer, 5)
assert answer_nums == [2, 4]
assert answer_relevances == [8, 6]
|
import pytest
from google.cloud.aiplatform_v1beta1 import FunctionCall
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
TextBlock,
ImageBlock,
)
from llama_index.llms.vertex.gemini_utils import (
convert_chat_message_to_gemini_content,
is_gemini_model,
)
def test_is_gemini_model():
assert is_gemini_model("gemini-2.0-flash") is True
assert is_gemini_model("chat-bison") is False
def test_convert_chat_message_to_gemini_content_with_function_call():
message = ChatMessage(
role=MessageRole.ASSISTANT,
content="",
additional_kwargs={
"tool_calls": [
FunctionCall(
name="test_fn",
args={"arg1": "val1"},
)
]
},
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "model"
assert len(result.parts) == 1
assert result.parts[0].function_call is not None
assert result.parts[0].function_call.name == "test_fn"
assert result.parts[0].function_call.args == {"arg1": "val1"}
def test_convert_chat_message_to_gemini_content_with_content():
message = ChatMessage(
role=MessageRole.USER,
content="test content",
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "user"
assert result.text == "test content"
assert len(result.parts) == 1
assert result.parts[0].text == "test content"
assert result.parts[0].function_call is None
def test_convert_chat_message_to_gemini_content_no_history():
message = ChatMessage(
role=MessageRole.USER,
content="test content",
)
result = convert_chat_message_to_gemini_content(message=message, is_history=False)
assert len(result) == 1
assert result[0].text == "test content"
assert result[0].function_call is None
def test_convert_chat_message_with_text_block():
message = ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello, world!")],
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "user"
assert len(result.parts) == 1
assert result.parts[0].text == "Hello, world!"
assert result.parts[0].function_call is None
def test_convert_chat_message_with_multiple_text_blocks():
message = ChatMessage(
role=MessageRole.USER,
blocks=[
TextBlock(text="Hi, "),
TextBlock(text="there!"),
],
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "user"
assert len(result.parts) == 2
assert result.parts[0].text == "Hi, "
assert result.parts[1].text == "there!"
def test_convert_chat_message_with_empty_text_block():
message = ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="")],
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "user"
assert len(result.parts) == 0
def test_convert_chat_message_with_invalid_image_block():
message = ChatMessage(
role=MessageRole.USER,
blocks=[ImageBlock(path=None, image=None, url=None)],
)
with pytest.raises(
ValueError, match="ImageBlock must have either path, url, or image data"
):
convert_chat_message_to_gemini_content(message=message, is_history=True)
|
from google.cloud.aiplatform_v1beta1 import FunctionCall
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.llms.vertex.gemini_utils import (
convert_chat_message_to_gemini_content,
is_gemini_model,
)
def test_is_gemini_model():
assert is_gemini_model("gemini-2.0-flash") is True
assert is_gemini_model("chat-bison") is False
def test_convert_chat_message_to_gemini_content_with_function_call():
message = ChatMessage(
role=MessageRole.ASSISTANT,
content="",
additional_kwargs={
"tool_calls": [
FunctionCall(
name="test_fn",
args={"arg1": "val1"},
)
]
},
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "model"
assert len(result.parts) == 1
assert result.parts[0].function_call is not None
assert result.parts[0].function_call.name == "test_fn"
assert result.parts[0].function_call.args == {"arg1": "val1"}
def test_convert_chat_message_to_gemini_content_with_content():
message = ChatMessage(
role=MessageRole.USER,
content="test content",
)
result = convert_chat_message_to_gemini_content(message=message, is_history=True)
assert result.role == "user"
assert result.text == "test content"
assert len(result.parts) == 1
assert result.parts[0].text == "test content"
assert result.parts[0].function_call is None
def test_convert_chat_message_to_gemini_content_no_history():
message = ChatMessage(
role=MessageRole.USER,
content="test content",
)
result = convert_chat_message_to_gemini_content(message=message, is_history=False)
assert len(result) == 1
assert result[0].text == "test content"
assert result[0].function_call is None
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder.
Args:
use_box_type (bool): Whether to warp decoded boxes with the
boxlist data structure. Defaults to False.
"""
# The size of the last of dimension of the encoded tensor.
encode_size = 4
def __init__(self, use_box_type: bool = False, **kwargs):
self.use_box_type = use_box_type
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder."""
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://api.sarvam.ai/v1"
DEFAULT_MODEL = "servam-m"
class Servam(OpenAILike):
"""
Servam LLM.
To instantiate the `Servam` class, you will need to provide an API key. You can set the API key either as an environment variable `SERVAM_API_KEY` or directly in the class
constructor. If setting it in the class constructor, it would look like this:
If you haven't signed up for an API key yet, you can do so on the Servam website at (https://servam.ai). Once you have your API key, you can use the `Servam` class to interact
with the LLM for tasks like chatting, streaming, and completing prompts.
Examples:
`pip install llama-index-llms-servam`
```python
from llama_index.llms.servam import Servam
llm = Servam(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="servam-m",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(description="The Servam model to use.")
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "SERVAM_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "SERVAM_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Servam_LLM"
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://api.sarvam.ai/v1"
DEFAULT_MODEL = "servam-m"
class Servam(OpenAILike):
"""
Servam LLM.
To instantiate the `Servam` class, you will need to provide an API key. You can set the API key either as an environment variable `SERVAM_API_KEY` or directly in the class
constructor. If setting it in the class constructor, it would look like this:
If you haven't signed up for an API key yet, you can do so on the Servam website at (https://servam.ai). Once you have your API key, you can use the `Servam` class to interact
with the LLM for tasks like chatting, streaming, and completing prompts.
Examples:
`pip install llama-index-llms-servam`
```python
from llama_index.llms.servam import Servam
llm = Servam(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="servam-m",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The Servam model to use."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "SERVAM_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "SERVAM_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Servam_LLM"
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.5.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.4.1"
@keras_export("keras.version")
def version():
return __version__
|
"""Comparison evaluators.
This module contains evaluators for comparing the output of two models,
be they LLMs, Chains, or otherwise. This can be used for scoring
preferences, measuring similarity / semantic equivalence between outputs,
or any other comparison task.
Example:
>>> from langchain_community.chat_models import ChatOpenAI
>>> from langchain.evaluation.comparison import PairwiseStringEvalChain
>>> llm = ChatOpenAI(temperature=0)
>>> chain = PairwiseStringEvalChain.from_llm(llm=llm)
>>> result = chain.evaluate_string_pairs(
... input = "What is the chemical formula for water?",
... prediction = "H2O",
... prediction_b = (
... "The chemical formula for water is H2O, which means"
... " there are two hydrogen atoms and one oxygen atom."
... reference = "The chemical formula for water is H2O.",
... )
>>> print(result)
# {
# "value": "B",
# "comment": "Both responses accurately state"
# " that the chemical formula for water is H2O."
# " However, Response B provides additional information"
# . " by explaining what the formula means.\\n[[B]]"
# }
"""
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
)
__all__ = ["LabeledPairwiseStringEvalChain", "PairwiseStringEvalChain"]
|
"""Comparison evaluators.
This module contains evaluators for comparing the output of two models,
be they LLMs, Chains, or otherwise. This can be used for scoring
preferences, measuring similarity / semantic equivalence between outputs,
or any other comparison task.
Example:
>>> from langchain_community.chat_models import ChatOpenAI
>>> from langchain.evaluation.comparison import PairwiseStringEvalChain
>>> llm = ChatOpenAI(temperature=0)
>>> chain = PairwiseStringEvalChain.from_llm(llm=llm)
>>> result = chain.evaluate_string_pairs(
... input = "What is the chemical formula for water?",
... prediction = "H2O",
... prediction_b = (
... "The chemical formula for water is H2O, which means"
... " there are two hydrogen atoms and one oxygen atom."
... reference = "The chemical formula for water is H2O.",
... )
>>> print(result)
# {
# "value": "B",
# "comment": "Both responses accurately state"
# " that the chemical formula for water is H2O."
# " However, Response B provides additional information"
# . " by explaining what the formula means.\\n[[B]]"
# }
"""
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
)
__all__ = ["PairwiseStringEvalChain", "LabeledPairwiseStringEvalChain"]
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "4.0.1.dev0"
from .arrow_dataset import Column, Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableColumn, IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "4.0.0"
from .arrow_dataset import Column, Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableColumn, IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""Text to Image tool spec."""
from io import BytesIO
from typing import List, Optional
import openai
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class TextToImageToolSpec(BaseToolSpec):
"""Text to Image tool spec."""
spec_functions = ["generate_images", "show_images", "generate_image_variation"]
def __init__(self, api_key: Optional[str] = None) -> None:
if api_key:
openai.api_key = api_key
def generate_images(
self, prompt: str, n: Optional[int] = 1, size: Optional[str] = "256x256"
) -> List[str]:
"""
Pass a prompt to OpenAIs text to image API to produce an image from the supplied query.
Args:
prompt (str): The prompt to generate an image(s) based on
n (int): The number of images to generate. Defaults to 1.
size (str): The size of the image(s) to generate. Defaults to 256x256. Other accepted values are 1024x1024 and 512x512
When handling the urls returned from this function, NEVER strip any parameters or try to modify the url, they are necessary for authorization to view the image
"""
try:
response = openai.Image.create(prompt=prompt, n=n, size=size)
return [image["url"] for image in response["data"]]
except openai.error.OpenAIError as e:
return e.error
def generate_image_variation(
self, url: str, n: Optional[int] = 1, size: Optional[str] = "256x256"
) -> str:
"""
Accepts the url of an image and uses OpenAIs api to generate a variation of the image.
This tool can take smaller images and create higher resolution variations, or vice versa.
When passing a url from "generate_images" ALWAYS pass the url exactly as it was returned from the function, including ALL query parameters
args:
url (str): The url of the image to create a variation of
n (int): The number of images to generate. Defaults to 1.
size (str): The size of the image(s) to generate. Defaults to 256x256. Other accepted values are 1024x1024 and 512x512
"""
try:
response = openai.Image.create_variation(
image=BytesIO(requests.get(url).content).getvalue(), n=n, size=size
)
return [image["url"] for image in response["data"]]
except openai.error.OpenAIError as e:
return e.error
def show_images(self, urls: List[str]):
"""
Use this function to display image(s) using pyplot and pillow. This works in a jupyter notebook.
Args:
urls (str): The url(s) of the image(s) to show
"""
import matplotlib.pyplot as plt
from PIL import Image
for url in urls:
plt.figure()
plt.imshow(Image.open(BytesIO(requests.get(url).content)))
return "images rendered successfully"
|
"""Text to Image tool spec."""
from io import BytesIO
from typing import List, Optional
import openai
import requests
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class TextToImageToolSpec(BaseToolSpec):
"""Text to Image tool spec."""
spec_functions = ["generate_images", "show_images", "generate_image_variation"]
def __init__(self, api_key: Optional[str] = None) -> None:
if api_key:
openai.api_key = api_key
def generate_images(
self, prompt: str, n: Optional[int] = 1, size: Optional[str] = "256x256"
) -> List[str]:
"""
Pass a prompt to OpenAIs text to image API to produce an image from the supplied query.
Args:
prompt (str): The prompt to generate an image(s) based on
n (int): The number of images to generate. Defaults to 1.
size (str): The size of the image(s) to generate. Defaults to 256x256. Other accepted values are 1024x1024 and 512x512
When handling the urls returned from this function, NEVER strip any parameters or try to modify the url, they are necessary for authorization to view the image
"""
try:
response = openai.Image.create(prompt=prompt, n=n, size=size)
return [image["url"] for image in response["data"]]
except openai.error.OpenAIError as e:
return e.error
def generate_image_variation(
self, url: str, n: Optional[int] = 1, size: Optional[str] = "256x256"
) -> str:
"""
Accepts the url of an image and uses OpenAIs api to generate a variation of the image.
This tool can take smaller images and create higher resolution variations, or vice versa.
When passing a url from "generate_images" ALWAYS pass the url exactly as it was returned from the function, including ALL query parameters
args:
url (str): The url of the image to create a variation of
n (int): The number of images to generate. Defaults to 1.
size (str): The size of the image(s) to generate. Defaults to 256x256. Other accepted values are 1024x1024 and 512x512
"""
try:
response = openai.Image.create_variation(
image=BytesIO(requests.get(url).content).getvalue(), n=n, size=size
)
return [image["url"] for image in response["data"]]
except openai.error.OpenAIError as e:
return e.error
def show_images(self, urls: List[str]):
"""
Use this function to display image(s) using pyplot and pillow. This works in a jupyter notebook.
Args:
urls (str): The url(s) of the image(s) to show
"""
import matplotlib.pyplot as plt
from PIL import Image
for url in urls:
plt.figure()
plt.imshow(Image.open(BytesIO(requests.get(url).content)))
return "images rendered successfully"
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
runner.message_hub.update_scalar(
'train/lr', runner.optimizer.param_groups[0]['lr'])
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
import logging
from typing import Any, List, Optional, Sequence
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.core.storage.storage_context import StorageContext
logger = logging.getLogger(__name__)
def load_index_from_storage(
storage_context: StorageContext,
index_id: Optional[str] = None,
**kwargs: Any,
) -> BaseIndex:
"""
Load index from storage context.
Args:
storage_context (StorageContext): storage context containing
docstore, index store and vector store.
index_id (Optional[str]): ID of the index to load.
Defaults to None, which assumes there's only a single index
in the index store and load it.
**kwargs: Additional keyword args to pass to the index constructors.
"""
index_ids: Optional[Sequence[str]]
if index_id is None:
index_ids = None
else:
index_ids = [index_id]
indices = load_indices_from_storage(storage_context, index_ids=index_ids, **kwargs)
if len(indices) == 0:
raise ValueError(
"No index in storage context, check if you specified the right persist_dir."
)
elif len(indices) > 1:
raise ValueError(
f"Expected to load a single index, but got {len(indices)} instead. "
"Please specify index_id."
)
return indices[0]
def load_indices_from_storage(
storage_context: StorageContext,
index_ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> List[BaseIndex]:
"""
Load multiple indices from storage context.
Args:
storage_context (StorageContext): storage context containing
docstore, index store and vector store.
index_id (Optional[Sequence[str]]): IDs of the indices to load.
Defaults to None, which loads all indices in the index store.
**kwargs: Additional keyword args to pass to the index constructors.
"""
if index_ids is None:
logger.info("Loading all indices.")
index_structs = storage_context.index_store.index_structs()
else:
logger.info(f"Loading indices with ids: {index_ids}")
index_structs = []
for index_id in index_ids:
index_struct = storage_context.index_store.get_index_struct(index_id)
if index_struct is None:
raise ValueError(f"Failed to load index with ID {index_id}")
index_structs.append(index_struct)
indices = []
for index_struct in index_structs:
type_ = index_struct.get_type()
index_cls = INDEX_STRUCT_TYPE_TO_INDEX_CLASS[type_]
index = index_cls(
index_struct=index_struct, storage_context=storage_context, **kwargs
)
indices.append(index)
return indices
def load_graph_from_storage(
storage_context: StorageContext,
root_id: str,
**kwargs: Any,
) -> ComposableGraph:
"""
Load composable graph from storage context.
Args:
storage_context (StorageContext): storage context containing
docstore, index store and vector store.
root_id (str): ID of the root index of the graph.
**kwargs: Additional keyword args to pass to the index constructors.
"""
indices = load_indices_from_storage(storage_context, index_ids=None, **kwargs)
all_indices = {index.index_id: index for index in indices}
return ComposableGraph(all_indices=all_indices, root_id=root_id)
|
import logging
from typing import Any, List, Optional, Sequence
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.core.storage.storage_context import StorageContext
logger = logging.getLogger(__name__)
def load_index_from_storage(
storage_context: StorageContext,
index_id: Optional[str] = None,
**kwargs: Any,
) -> BaseIndex:
"""Load index from storage context.
Args:
storage_context (StorageContext): storage context containing
docstore, index store and vector store.
index_id (Optional[str]): ID of the index to load.
Defaults to None, which assumes there's only a single index
in the index store and load it.
**kwargs: Additional keyword args to pass to the index constructors.
"""
index_ids: Optional[Sequence[str]]
if index_id is None:
index_ids = None
else:
index_ids = [index_id]
indices = load_indices_from_storage(storage_context, index_ids=index_ids, **kwargs)
if len(indices) == 0:
raise ValueError(
"No index in storage context, check if you specified the right persist_dir."
)
elif len(indices) > 1:
raise ValueError(
f"Expected to load a single index, but got {len(indices)} instead. "
"Please specify index_id."
)
return indices[0]
def load_indices_from_storage(
storage_context: StorageContext,
index_ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> List[BaseIndex]:
"""Load multiple indices from storage context.
Args:
storage_context (StorageContext): storage context containing
docstore, index store and vector store.
index_id (Optional[Sequence[str]]): IDs of the indices to load.
Defaults to None, which loads all indices in the index store.
**kwargs: Additional keyword args to pass to the index constructors.
"""
if index_ids is None:
logger.info("Loading all indices.")
index_structs = storage_context.index_store.index_structs()
else:
logger.info(f"Loading indices with ids: {index_ids}")
index_structs = []
for index_id in index_ids:
index_struct = storage_context.index_store.get_index_struct(index_id)
if index_struct is None:
raise ValueError(f"Failed to load index with ID {index_id}")
index_structs.append(index_struct)
indices = []
for index_struct in index_structs:
type_ = index_struct.get_type()
index_cls = INDEX_STRUCT_TYPE_TO_INDEX_CLASS[type_]
index = index_cls(
index_struct=index_struct, storage_context=storage_context, **kwargs
)
indices.append(index)
return indices
def load_graph_from_storage(
storage_context: StorageContext,
root_id: str,
**kwargs: Any,
) -> ComposableGraph:
"""Load composable graph from storage context.
Args:
storage_context (StorageContext): storage context containing
docstore, index store and vector store.
root_id (str): ID of the root index of the graph.
**kwargs: Additional keyword args to pass to the index constructors.
"""
indices = load_indices_from_storage(storage_context, index_ids=None, **kwargs)
all_indices = {index.index_id: index for index in indices}
return ComposableGraph(all_indices=all_indices, root_id=root_id)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# TODO: support auto scaling lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
# auto_scale_lr = dict(base_batch_size=16)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
from pydantic import BaseModel
from typing import Optional, Dict, List
class AlphaMatrix(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriever
Its best to just instantiate this using a dictionary, don't both trying to instantiate by declaring any AlphaCategory objects.
Example:
>>> data = {
"normal query": { # examples is not required if you aren't using few-shot auto-routing
"alpha": .5
, "description": "This is a normal query" # desc is not required if you aren't using few-shot auto-routing
, "examples": ["This is a normal query", "Another normal query"]
}
}
>>> matrix = AlphaMatrix(data=data) # arg must be named matrix for the retriever to use it
"""
class AlphaCategory(BaseModel):
"""
Subclass to enforce the required fields for a category in the AlphaMatrix - necessary for nesting in the AlphaMatrix class
You should not have to really touch this, as it is only used for type checking and validation.
"""
alpha: float
description: Optional[str] = (
None # optional if providing a custom LLM, its presumed this was part of your training data for the custom model
)
examples: Optional[List[str]] = (
None # if not providing a custom model, this is required
)
data: Dict[str, AlphaCategory]
def get_alpha(self, category: str) -> float:
"""Simple helper function to get the alpha value for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).alpha # type: ignore
def get_examples(self, category: str) -> List[str]:
"""Simple helper function to get the examples for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).examples # type: ignore
def get_description(self, category: str) -> str:
"""Simple helper function to get the description for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).description # type: ignore
def get_categories(self) -> list:
"""Simple helper function to get the categories for a given category."""
return list(self.data.keys())
def format_category(self, category: str) -> str:
"""Simple helper function to format the category information for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
description = self.get_description(category)
examples = self.get_examples(category)
category_info = f"""
- {category}:
description: {description}
""".strip()
if examples:
examples = "; ".join(examples)
example_info = f"""
examples:
{examples}
"""
category_info = f"{category_info}\n{example_info}"
return category_info
def get_all_category_info(self) -> str:
"""Simple helper function to get the category information for all categories."""
categories = []
for category in self.get_categories():
category_info = self.format_category(category)
categories.append(category_info)
return "\n".join(categories)
|
from pydantic import BaseModel
from typing import Optional, Dict, List
class AlphaMatrix(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriever
Its best to just instantiate this using a dictionary, don't both trying to instantiate by declaring any AlphaCategory objects.
Example:
>>> data = {
"normal query": { # examples is not required if you aren't using few-shot auto-routing
"alpha": .5
, "description": "This is a normal query" # desc is not required if you aren't using few-shot auto-routing
, "examples": ["This is a normal query", "Another normal query"]
}
}
>>> matrix = AlphaMatrix(data=data) # arg must be named matrix for the retriever to use it
"""
class AlphaCategory(BaseModel):
"""
Subclass to enforce the required fields for a category in the AlphaMatrix - necessary for nesting in the AlphaMatrix class
You should not have to really touch this, as it is only used for type checking and validation.
"""
alpha: float
description: Optional[
str
] = None # optional if providing a custom LLM, its presumed this was part of your training data for the custom model
examples: Optional[
List[str]
] = None # if not providing a custom model, this is required
data: Dict[str, AlphaCategory]
def get_alpha(self, category: str) -> float:
"""Simple helper function to get the alpha value for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).alpha # type: ignore
def get_examples(self, category: str) -> List[str]:
"""Simple helper function to get the examples for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).examples # type: ignore
def get_description(self, category: str) -> str:
"""Simple helper function to get the description for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).description # type: ignore
def get_categories(self) -> list:
"""Simple helper function to get the categories for a given category."""
return list(self.data.keys())
def format_category(self, category: str) -> str:
"""Simple helper function to format the category information for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
description = self.get_description(category)
examples = self.get_examples(category)
category_info = f"""
- {category}:
description: {description}
""".strip()
if examples:
examples = "; ".join(examples)
example_info = f"""
examples:
{examples}
"""
category_info = f"{category_info}\n{example_info}"
return category_info
def get_all_category_info(self) -> str:
"""Simple helper function to get the category information for all categories."""
categories = []
for category in self.get_categories():
category_info = self.format_category(category)
categories.append(category_info)
return "\n".join(categories)
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
|
"""Test EdenAi's text moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai.text_moderation import EdenAiTextModerationTool
def test_edenai_call() -> None:
"""Test simple call to edenai's text moderation endpoint."""
text_moderation = EdenAiTextModerationTool(providers=["openai"], language="en")
output = text_moderation.invoke("i hate you")
assert text_moderation.name == "edenai_explicit_content_detection_text"
assert text_moderation.feature == "text"
assert text_moderation.subfeature == "moderation"
assert isinstance(output, str)
|
"""Test EdenAi's text moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain_community.tools.edenai.text_moderation import EdenAiTextModerationTool
def test_edenai_call() -> None:
"""Test simple call to edenai's text moderation endpoint."""
text_moderation = EdenAiTextModerationTool(providers=["openai"], language="en") # type: ignore[call-arg]
output = text_moderation.invoke("i hate you")
assert text_moderation.name == "edenai_explicit_content_detection_text"
assert text_moderation.feature == "text"
assert text_moderation.subfeature == "moderation"
assert isinstance(output, str)
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.4.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.3.3"
@keras_export("keras.version")
def version():
return __version__
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import ConfigType, OptMultiConfig, SampleList
from mmdet.registry import MODELS
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from mmengine.model import stack_batch
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, data_samples):
output = self.forward(x)
seg_preds = output['seg_preds']
gt_semantic_segs = [
data_sample.gt_sem_seg for data_sample in data_samples
]
gt_semantic_segs = stack_batch(gt_semantic_segs, pad_value=255)
return self.loss(seg_preds, gt_semantic_segs)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: int | None = None):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
f"MRR@{self.at_k}",
f"NDCG@{self.at_k}",
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
f"Queries: {num_queries} \t Positives: Min {np.min(num_positives):.1f}, Mean {np.mean(num_positives):.1f}, Max {np.max(num_positives):.1f} \t Negatives: Min {np.min(num_negatives):.1f}, Mean {np.mean(num_negatives):.1f}, Max {np.max(num_negatives):.1f}"
)
logger.info(f"MRR@{self.at_k}: {mean_mrr * 100:.2f}")
logger.info(f"NDCG@{self.at_k}: {mean_ndcg * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: int | None = None):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
|
# Copyright (c) OpenMMLab. All rights reserved.
from io import StringIO
from .file_client import FileClient
def list_from_file(filename,
prefix='',
offset=0,
max_num=0,
encoding='utf-8',
file_client_args=None):
"""Load a text file and parse the content as a list of strings.
Note:
In v1.3.16 and later, ``list_from_file`` supports loading a text file
which can be storaged in different backends and parsing the content as
a list for strings.
Args:
filename (str): Filename.
prefix (str): The prefix to be inserted to the beginning of each item.
offset (int): The offset of lines.
max_num (int): The maximum number of lines to be read,
zeros and negatives mean no limitation.
encoding (str): Encoding used to open the file. Default utf-8.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> list_from_file('/path/of/your/file') # disk
['hello', 'world']
>>> list_from_file('s3://path/of/your/file') # ceph or petrel
['hello', 'world']
Returns:
list[str]: A list of strings.
"""
cnt = 0
item_list = []
file_client = FileClient.infer_client(file_client_args, filename)
with StringIO(file_client.get_text(filename, encoding)) as f:
for _ in range(offset):
f.readline()
for line in f:
if 0 < max_num <= cnt:
break
item_list.append(prefix + line.rstrip('\n\r'))
cnt += 1
return item_list
def dict_from_file(filename,
key_type=str,
encoding='utf-8',
file_client_args=None):
"""Load a text file and parse the content as a dict.
Each line of the text file will be two or more columns split by
whitespaces or tabs. The first column will be parsed as dict keys, and
the following columns will be parsed as dict values.
Note:
In v1.3.16 and later, ``dict_from_file`` supports loading a text file
which can be storaged in different backends and parsing the content as
a dict.
Args:
filename(str): Filename.
key_type(type): Type of the dict keys. str is user by default and
type conversion will be performed if specified.
encoding (str): Encoding used to open the file. Default utf-8.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> dict_from_file('/path/of/your/file') # disk
{'key1': 'value1', 'key2': 'value2'}
>>> dict_from_file('s3://path/of/your/file') # ceph or petrel
{'key1': 'value1', 'key2': 'value2'}
Returns:
dict: The parsed contents.
"""
mapping = {}
file_client = FileClient.infer_client(file_client_args, filename)
with StringIO(file_client.get_text(filename, encoding)) as f:
for line in f:
items = line.rstrip('\n').split()
assert len(items) >= 2
key = key_type(items[0])
val = items[1:] if len(items) > 2 else items[1]
mapping[key] = val
return mapping
|
# Copyright (c) OpenMMLab. All rights reserved.
# type: ignore
from io import StringIO
from .file_client import FileClient
def list_from_file(filename,
prefix='',
offset=0,
max_num=0,
encoding='utf-8',
file_client_args=None):
"""Load a text file and parse the content as a list of strings.
Note:
In v1.3.16 and later, ``list_from_file`` supports loading a text file
which can be storaged in different backends and parsing the content as
a list for strings.
Args:
filename (str): Filename.
prefix (str): The prefix to be inserted to the beginning of each item.
offset (int): The offset of lines.
max_num (int): The maximum number of lines to be read,
zeros and negatives mean no limitation.
encoding (str): Encoding used to open the file. Default utf-8.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> list_from_file('/path/of/your/file') # disk
['hello', 'world']
>>> list_from_file('s3://path/of/your/file') # ceph or petrel
['hello', 'world']
Returns:
list[str]: A list of strings.
"""
cnt = 0
item_list = []
file_client = FileClient.infer_client(file_client_args, filename)
with StringIO(file_client.get_text(filename, encoding)) as f:
for _ in range(offset):
f.readline()
for line in f:
if 0 < max_num <= cnt:
break
item_list.append(prefix + line.rstrip('\n\r'))
cnt += 1
return item_list
def dict_from_file(filename,
key_type=str,
encoding='utf-8',
file_client_args=None):
"""Load a text file and parse the content as a dict.
Each line of the text file will be two or more columns split by
whitespaces or tabs. The first column will be parsed as dict keys, and
the following columns will be parsed as dict values.
Note:
In v1.3.16 and later, ``dict_from_file`` supports loading a text file
which can be storaged in different backends and parsing the content as
a dict.
Args:
filename(str): Filename.
key_type(type): Type of the dict keys. str is user by default and
type conversion will be performed if specified.
encoding (str): Encoding used to open the file. Default utf-8.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
Examples:
>>> dict_from_file('/path/of/your/file') # disk
{'key1': 'value1', 'key2': 'value2'}
>>> dict_from_file('s3://path/of/your/file') # ceph or petrel
{'key1': 'value1', 'key2': 'value2'}
Returns:
dict: The parsed contents.
"""
mapping = {}
file_client = FileClient.infer_client(file_client_args, filename)
with StringIO(file_client.get_text(filename, encoding)) as f:
for line in f:
items = line.rstrip('\n').split()
assert len(items) >= 2
key = key_type(items[0])
val = items[1:] if len(items) > 2 else items[1]
mapping[key] = val
return mapping
|
import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
if value is None:
return
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
attr_dict = weakref.WeakValueDictionary()
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
attr_dict[id(tensor)] = value
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
return getattr(tensor, attr, None)
|
import weakref
from keras.src.backend.common import global_state
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, "_keras_mask", value)
except AttributeError:
if value is None:
return
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
attr_dict = weakref.WeakValueDictionary()
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
attr_dict[id(tensor)] = value
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
return getattr(tensor, attr, None)
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .transforms_test_impl import TransformsTestBase
@skipIfNoCuda
class TransformsCUDAFloat32Test(TransformsTestBase, PytorchTestCase):
device = "cuda"
dtype = torch.float32
@skipIfNoCuda
class TransformsCUDAFloat64Test(TransformsTestBase, PytorchTestCase):
device = "cuda"
dtype = torch.float64
|
import torch
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoCuda,
)
from .transforms_test_impl import TransformsTestBase
@skipIfNoCuda
class TransformsCUDAFloat32Test(TransformsTestBase, PytorchTestCase):
device = "cuda"
dtype = torch.float32
@skipIfNoCuda
class TransformsCUDAFloat64Test(TransformsTestBase, PytorchTestCase):
device = "cuda"
dtype = torch.float64
|
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = pa.schema(self.dataset.features.type)
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in range(0, len(self.dataset), batch_size):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
|
import os
from typing import BinaryIO, Optional, Union
import pyarrow as pa
import pyarrow.parquet as pq
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = pa.schema(self.dataset.features.type)
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in range(0, len(self.dataset), batch_size):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import DuckDuckGoSearchResults, DuckDuckGoSearchRun
from langchain_community.tools.ddg_search.tool import DDGInput, DuckDuckGoSearchTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DDGInput": "langchain_community.tools.ddg_search.tool",
"DuckDuckGoSearchRun": "langchain_community.tools",
"DuckDuckGoSearchResults": "langchain_community.tools",
"DuckDuckGoSearchTool": "langchain_community.tools.ddg_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DDGInput",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"DuckDuckGoSearchTool",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import DuckDuckGoSearchResults, DuckDuckGoSearchRun
from langchain_community.tools.ddg_search.tool import DDGInput, DuckDuckGoSearchTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"DDGInput": "langchain_community.tools.ddg_search.tool",
"DuckDuckGoSearchRun": "langchain_community.tools",
"DuckDuckGoSearchResults": "langchain_community.tools",
"DuckDuckGoSearchTool": "langchain_community.tools.ddg_search.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DDGInput",
"DuckDuckGoSearchRun",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchTool",
]
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client.args.protocol == GatewayProtocolType.from_string(protocol)
# gateway_args returns multiple protocols
assert f.gateway_args.protocol[0] == GatewayProtocolType.from_string(protocol)
# flow returns single or multiple protocols
assert f.protocol == GatewayProtocolType.from_string(protocol)
assert f.client.args.port == 12345
# gateway_args returns multiple ports
assert f.gateway_args.port[0] == 12345
# flow returns single or multiple ports
assert f.port == 12345
f._update_network_interface(port=54321)
assert f.client.args.port == 54321
assert f.gateway_args.port[0] == 54321
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_get_set_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client_args.protocol == GatewayProtocolType.from_string(protocol)
# gateway_args returns multiple protocols
assert f.gateway_args.protocol[0] == GatewayProtocolType.from_string(protocol)
# flow returns single or multiple protocols
assert f.protocol == GatewayProtocolType.from_string(protocol)
assert f.client_args.port == 12345
# gateway_args returns multiple ports
assert f.gateway_args.port[0] == 12345
# flow returns single or multiple ports
assert f.port == 12345
f._update_network_interface(port=54321)
assert f.client_args.port == 54321
assert f.gateway_args.port[0] == 54321
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Query Sparsity: Active Dimensions: 74.9, Sparsity Ratio: 0.9975
Model Corpus Sparsity: Active Dimensions: 174.8, Sparsity Ratio: 0.9943
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Sparsity Stats Query : Row Non-Zero Mean: 74.93406589214618, Row Sparsity Mean: 0.9975449305314285
Model Sparsity Stats Corpus : Row Non-Zero Mean: 174.8070262028621, Row Sparsity Mean: 0.9942727547425491
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
:param model: SentenceTransformer model
:param scale: Output of similarity function is multiplied by scale value
:param similarity_fct: similarity function between sentence embeddings. By default, cos_sim. Can also be set to dot product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, losses, InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-uncased')
train_examples = [
InputExample(texts=['Anchor 1', 'Positive 1']),
InputExample(texts=['Anchor 2', 'Positive 2']),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=32)
train_loss = losses.MultipleNegativesSymmetricRankingLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
import inspect
import re
from hashlib import sha256
from typing import List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
|
import inspect
import re
from hashlib import sha256
from typing import List
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
|
"""
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def individual_tree() -> None:
"""Get prediction from each individual tree and combine them together."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i in range(n_rounds):
# - Use output_margin to get raw leaf values
# - Use iteration_range to get prediction for only one tree
# - Use previous prediction as base marign for the model
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=False
)
else:
# get raw leaf value for accumulation
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=True
)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
def model_slices() -> None:
"""Inference with each individual tree using model slices."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
trees = [booster[t] for t in range(n_rounds)]
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i, t in enumerate(trees):
# Feed previous scores into base margin.
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = t.predict(Xy_test, output_margin=False)
else:
# get raw leaf value for accumulation
scores = t.predict(Xy_test, output_margin=True)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
if __name__ == "__main__":
individual_tree()
model_slices()
|
"""
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def individual_tree() -> None:
"""Get prediction from each individual tree and combine them together."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i in range(n_rounds):
# - Use output_margin to get raw leaf values
# - Use iteration_range to get prediction for only one tree
# - Use previous prediction as base marign for the model
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=False
)
else:
# get raw leaf value for accumulation
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=True
)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
def model_slices() -> None:
"""Inference with each individual tree using model slices."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
trees = [booster[t] for t in range(n_rounds)]
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i, t in enumerate(trees):
# Feed previous scores into base margin.
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = t.predict(Xy_test, output_margin=False)
else:
# get raw leaf value for accumulation
scores = t.predict(Xy_test, output_margin=True)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
if __name__ == "__main__":
individual_tree()
model_slices()
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_sample(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled = da.sample(1)
assert len(sampled) == 1
sampled = da.sample(5)
assert len(sampled) == 5
assert isinstance(sampled, DocumentArray)
with pytest.raises(ValueError):
da.sample(101) # can not sample with k greater than lenth of document array.
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_sample_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled_1 = da.sample(5, seed=1)
sampled_2 = da.sample(5, seed=1)
sampled_3 = da.sample(5, seed=2)
assert len(sampled_1) == len(sampled_2) == len(sampled_3) == 5
assert sampled_1 == sampled_2
assert sampled_1 != sampled_3
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_shuffle(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled = da.shuffle()
assert len(shuffled) == len(da)
assert isinstance(shuffled, DocumentArray)
ids_before_shuffle = [d.id for d in da]
ids_after_shuffle = [d.id for d in shuffled]
assert ids_before_shuffle != ids_after_shuffle
assert sorted(ids_before_shuffle) == sorted(ids_after_shuffle)
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_shuffle_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled_1 = da.shuffle(seed=1)
shuffled_2 = da.shuffle(seed=1)
shuffled_3 = da.shuffle(seed=2)
assert len(shuffled_1) == len(shuffled_2) == len(shuffled_3) == len(da)
assert shuffled_1 == shuffled_2
assert shuffled_1 != shuffled_3
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_sample(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled = da.sample(1)
assert len(sampled) == 1
sampled = da.sample(5)
assert len(sampled) == 5
assert isinstance(sampled, DocumentArray)
with pytest.raises(ValueError):
da.sample(101) # can not sample with k greater than lenth of document array.
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_sample_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
sampled_1 = da.sample(5, seed=1)
sampled_2 = da.sample(5, seed=1)
sampled_3 = da.sample(5, seed=2)
assert len(sampled_1) == len(sampled_2) == len(sampled_3) == 5
assert sampled_1 == sampled_2
assert sampled_1 != sampled_3
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_shuffle(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled = da.shuffle()
assert len(shuffled) == len(da)
assert isinstance(shuffled, DocumentArray)
ids_before_shuffle = [d.id for d in da]
ids_after_shuffle = [d.id for d in shuffled]
assert ids_before_shuffle != ids_after_shuffle
assert sorted(ids_before_shuffle) == sorted(ids_after_shuffle)
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_shuffle_with_seed(da_cls, config, start_storage):
if config:
da = da_cls.empty(100, config=config)
else:
da = da_cls.empty(100)
shuffled_1 = da.shuffle(seed=1)
shuffled_2 = da.shuffle(seed=1)
shuffled_3 = da.shuffle(seed=2)
assert len(shuffled_1) == len(shuffled_2) == len(shuffled_3) == len(da)
assert shuffled_1 == shuffled_2
assert shuffled_1 != shuffled_3
|
def __getattr__(name: str):
import warnings
if name == "AudioMetaData":
warnings.warn(
"`torchaudio.backend.common.AudioMetaData` has been moved to "
"`torchaudio.AudioMetaData`. Please update the import path.",
stacklevel=2,
)
from torchaudio import AudioMetaData
return AudioMetaData
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
def __getattr__(name: str):
import warnings
if name == "AudioMetaData":
warnings.warn(
"`torchaudio.backend.common.AudioMetaData` has been moved to "
"`torchaudio.AudioMetaData`. Please update the import path.",
stacklevel=2,
)
from torchaudio._backend.common import AudioMetaData
return AudioMetaData
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
"""
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask import DaskDMatrix
def probability_for_going_backward(epoch):
return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch))
# All callback functions must inherit from TrainingCallback
class CustomEarlyStopping(xgb.callback.TrainingCallback):
"""A custom early stopping class where early stopping is determined stochastically.
In the beginning, allow the metric to become worse with a probability of 0.999.
As boosting progresses, the probability should be adjusted downward"""
def __init__(self, *, validation_set, target_metric, maximize, seed):
self.validation_set = validation_set
self.target_metric = target_metric
self.maximize = maximize
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
if maximize:
self.better = lambda x, y: x > y
else:
self.better = lambda x, y: x < y
def after_iteration(self, model, epoch, evals_log):
metric_history = evals_log[self.validation_set][self.target_metric]
if len(metric_history) < 2 or self.better(
metric_history[-1], metric_history[-2]
):
return False # continue training
p = probability_for_going_backward(epoch)
go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype(
np.bool
)[0]
print(
"The validation metric went into the wrong direction. "
+ f"Stopping training with probability {1 - p}..."
)
if go_backward:
return False # continue training
else:
return True # stop training
def main(client):
m = 100000
n = 100
X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = DaskDMatrix(client, X_train, y_train)
dtest = DaskDMatrix(client, X_test, y_test)
output = dxgb.train(
client,
{
"verbosity": 1,
"tree_method": "hist",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"max_depth": 6,
"learning_rate": 1.0,
},
dtrain,
num_boost_round=1000,
evals=[(dtrain, "train"), (dtest, "test")],
callbacks=[
CustomEarlyStopping(
validation_set="test", target_metric="rmse", maximize=False, seed=0
)
],
)
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask import DaskDMatrix
def probability_for_going_backward(epoch):
return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch))
# All callback functions must inherit from TrainingCallback
class CustomEarlyStopping(xgb.callback.TrainingCallback):
"""A custom early stopping class where early stopping is determined stochastically.
In the beginning, allow the metric to become worse with a probability of 0.999.
As boosting progresses, the probability should be adjusted downward"""
def __init__(self, *, validation_set, target_metric, maximize, seed):
self.validation_set = validation_set
self.target_metric = target_metric
self.maximize = maximize
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
if maximize:
self.better = lambda x, y: x > y
else:
self.better = lambda x, y: x < y
def after_iteration(self, model, epoch, evals_log):
metric_history = evals_log[self.validation_set][self.target_metric]
if len(metric_history) < 2 or self.better(
metric_history[-1], metric_history[-2]
):
return False # continue training
p = probability_for_going_backward(epoch)
go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype(
np.bool
)[0]
print(
"The validation metric went into the wrong direction. "
+ f"Stopping training with probability {1 - p}..."
)
if go_backward:
return False # continue training
else:
return True # stop training
def main(client):
m = 100000
n = 100
X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = DaskDMatrix(client, X_train, y_train)
dtest = DaskDMatrix(client, X_test, y_test)
output = dxgb.train(
client,
{
"verbosity": 1,
"tree_method": "hist",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"max_depth": 6,
"learning_rate": 1.0,
},
dtrain,
num_boost_round=1000,
evals=[(dtrain, "train"), (dtest, "test")],
callbacks=[
CustomEarlyStopping(
validation_set="test", target_metric="rmse", maximize=False, seed=0
)
],
)
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
import grpc.aio
import pytest
from grpc import StatusCode
from grpc.aio import Metadata
from jina.excepts import BaseJinaException, InternalNetworkError
@pytest.fixture
def aio_rpc_error():
return grpc.aio.AioRpcError(StatusCode.OK, None, None, details='I am a grpc error')
def test_ine_parent_classes(aio_rpc_error):
err = InternalNetworkError(aio_rpc_error)
# check that it can be caught when we expect AioRpcError or BaseJinaException
with pytest.raises(grpc.aio.AioRpcError):
raise err
with pytest.raises(BaseJinaException):
raise err
def test_ine_statuscode(aio_rpc_error):
err = InternalNetworkError(aio_rpc_error)
assert err.code() == aio_rpc_error.code()
def test_ine_details(aio_rpc_error):
err = InternalNetworkError(aio_rpc_error)
assert err.details() == aio_rpc_error.details()
err = InternalNetworkError(aio_rpc_error, details='I am not a normal grpc error!')
assert err.details() == 'I am not a normal grpc error!'
assert str(err) == 'I am not a normal grpc error!'
@pytest.mark.parametrize('metadata', [None, Metadata(('content-length', '0'))])
def test_ine_trailing_metadata(metadata):
aio_rpc_error = grpc.aio.AioRpcError(
StatusCode.OK,
None,
trailing_metadata=metadata,
details='I am a grpc error',
)
err = InternalNetworkError(aio_rpc_error)
if metadata:
assert (
str(err)
== 'I am a grpc error\ntrailing_metadata=Metadata(((\'content-length\', \'0\'),))'
)
else:
assert str(err) == 'I am a grpc error'
|
import grpc.aio
import pytest
from grpc import StatusCode
from jina.excepts import BaseJinaException, InternalNetworkError
@pytest.fixture
def aio_rpc_error():
return grpc.aio.AioRpcError(StatusCode.OK, None, None, details='I am a grpc error')
def test_ine_parent_classes(aio_rpc_error):
err = InternalNetworkError(aio_rpc_error)
# check that it can be caught when we expect AioRpcError or BaseJinaException
with pytest.raises(grpc.aio.AioRpcError):
raise err
with pytest.raises(BaseJinaException):
raise err
def test_ine_statuscode(aio_rpc_error):
err = InternalNetworkError(aio_rpc_error)
assert err.code() == aio_rpc_error.code()
def test_ine_details(aio_rpc_error):
err = InternalNetworkError(aio_rpc_error)
assert err.details() == aio_rpc_error.details()
err = InternalNetworkError(aio_rpc_error, details='I am not a normal grpc error!')
assert err.details() == 'I am not a normal grpc error!'
assert str(err) == 'I am not a normal grpc error!'
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import gzip
import json
import os
from sentence_transformers import SentenceTransformer, util
# Load the model we trained in 2_programming_train_bi-encoder.py
model = SentenceTransformer("output/programming-model")
# Load the corpus
docs = []
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
title = data["title"]
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
docs.append((title, p))
paragraph_emb = model.encode([d[1] for d in docs], convert_to_tensor=True)
print("Available Wikipedia Articles:")
print(", ".join(sorted(list(set([d[0] for d in docs])))))
# Example for semantic search
while True:
query = input("Query: ")
query_emb = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_emb, paragraph_emb, top_k=3)[0]
for hit in hits:
doc = docs[hit["corpus_id"]]
print("{:.2f}\t{}\t\t{}".format(hit["score"], doc[0], doc[1]))
print("\n=================\n")
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
from sentence_transformers import SentenceTransformer, util
import gzip
import json
import os
# Load the model we trained in 2_programming_train_bi-encoder.py
model = SentenceTransformer("output/programming-model")
# Load the corpus
docs = []
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
title = data["title"]
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
docs.append((title, p))
paragraph_emb = model.encode([d[1] for d in docs], convert_to_tensor=True)
print("Available Wikipedia Articles:")
print(", ".join(sorted(list(set([d[0] for d in docs])))))
# Example for semantic search
while True:
query = input("Query: ")
query_emb = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_emb, paragraph_emb, top_k=3)[0]
for hit in hits:
doc = docs[hit["corpus_id"]]
print("{:.2f}\t{}\t\t{}".format(hit["score"], doc[0], doc[1]))
print("\n=================\n")
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import (
decode_predictions as decode_predictions,
)
from keras.src.applications.imagenet_utils import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import decode_predictions
from keras.src.applications.imagenet_utils import preprocess_input
|
"""
====================================
How to write your own TVTensor class
====================================
.. note::
Try on `Colab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_tv_tensors.py>` to download the full example code.
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own TVTensor class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_transforms_plot_tv_tensors.py`.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.tv_tensors.TVTensor` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.tv_tensors.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/tv_tensors/_bounding_box.py>`_.
class MyTVTensor(tv_tensors.TVTensor):
pass
my_dp = MyTVTensor([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom TVTensor class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyTVTensor class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", tv_tensor_cls=MyTVTensor)
def hflip_my_tv_tensor(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.tv_tensors.wrap` is used, see
# :ref:`tv_tensor_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyTVTensor`` instance:
my_dp = MyTVTensor(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_tv_tensor(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""
====================================
How to write your own TVTensor class
====================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_tv_tensors.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_tv_tensors.py>` to download the full example code.
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own TVTensor class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_transforms_plot_tv_tensors.py`.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.tv_tensors.TVTensor` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.tv_tensors.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/tv_tensors/_bounding_box.py>`_.
class MyTVTensor(tv_tensors.TVTensor):
pass
my_dp = MyTVTensor([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom TVTensor class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyTVTensor class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", tv_tensor_cls=MyTVTensor)
def hflip_my_tv_tensor(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.tv_tensors.wrap` is used, see
# :ref:`tv_tensor_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyTVTensor`` instance:
my_dp = MyTVTensor(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_tv_tensor(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return tv_tensors.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
], [dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||βΒ²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||βΒ²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - Γͺ||βΒ²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + Ξ²*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||βΒ²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||βΒ²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - Γͺ||βΒ²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + Ξ²*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilbert-base-uncased"
train_batch_size = 128
num_epochs = 1
max_seq_length = 32
# Save path to store our model
model_save_path = "output/training_stsb_simcse-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Here we define our SentenceTransformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_samples = []
with open(wikipedia_dataset_path, encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = int(len(train_dataloader) * 0.1) # Evaluate every 10% of the data
logging.info(f"Training sentences: {len(train_samples)}")
logging.info(f"Warmup-steps: {warmup_steps}")
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilbert-base-uncased"
train_batch_size = 128
num_epochs = 1
max_seq_length = 32
# Save path to store our model
model_save_path = "output/training_stsb_simcse-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Here we define our SentenceTransformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_samples = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = int(len(train_dataloader) * 0.1) # Evaluate every 10% of the data
logging.info("Training sentences: {}".format(len(train_samples)))
logging.info("Warmup-steps: {}".format(warmup_steps))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
_NORMALIZED_TRANSCRIPT = [
"Test transcript one",
"Test transcript two",
"Test transcript three",
"In fourteen sixty-five Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
def get_mock_dataset(root_dir):
"""
root_dir: path to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LJSpeech-1.1")
archive_dir = os.path.join(base_dir, "wavs")
os.makedirs(archive_dir, exist_ok=True)
metadata_path = os.path.join(base_dir, "metadata.csv")
sample_rate = 22050
with open(metadata_path, mode="w", newline="") as metadata_file:
metadata_writer = csv.writer(metadata_file, delimiter="|", quoting=csv.QUOTE_NONE)
for i, (transcript, normalized_transcript) in enumerate(zip(_TRANSCRIPTS, _NORMALIZED_TRANSCRIPT)):
fileid = f"LJ001-{i:04d}"
metadata_writer.writerow([fileid, transcript, normalized_transcript])
filename = fileid + ".wav"
path = os.path.join(archive_dir, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=1, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, sample_rate)
mocked_data.append(normalize_wav(data))
return mocked_data, _TRANSCRIPTS, _NORMALIZED_TRANSCRIPT
class TestLJSpeech(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data, _transcripts, _normalized_transcript = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._transcripts, cls._normalized_transcript = get_mock_dataset(cls.root_dir)
def _test_ljspeech(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, normalized_transcript) in enumerate(dataset):
expected_transcript = self._transcripts[i]
expected_normalized_transcript = self._normalized_transcript[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == sample_rate
assert transcript == expected_transcript
assert normalized_transcript == expected_normalized_transcript
n_ite += 1
assert n_ite == len(self.data)
def test_ljspeech_str(self):
dataset = ljspeech.LJSPEECH(self.root_dir)
self._test_ljspeech(dataset)
def test_ljspeech_path(self):
dataset = ljspeech.LJSPEECH(Path(self.root_dir))
self._test_ljspeech(dataset)
|
import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
_NORMALIZED_TRANSCRIPT = [
"Test transcript one",
"Test transcript two",
"Test transcript three",
"In fourteen sixty-five Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
def get_mock_dataset(root_dir):
"""
root_dir: path to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LJSpeech-1.1")
archive_dir = os.path.join(base_dir, "wavs")
os.makedirs(archive_dir, exist_ok=True)
metadata_path = os.path.join(base_dir, "metadata.csv")
sample_rate = 22050
with open(metadata_path, mode="w", newline="") as metadata_file:
metadata_writer = csv.writer(metadata_file, delimiter="|", quoting=csv.QUOTE_NONE)
for i, (transcript, normalized_transcript) in enumerate(zip(_TRANSCRIPTS, _NORMALIZED_TRANSCRIPT)):
fileid = f"LJ001-{i:04d}"
metadata_writer.writerow([fileid, transcript, normalized_transcript])
filename = fileid + ".wav"
path = os.path.join(archive_dir, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=1, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, sample_rate)
mocked_data.append(normalize_wav(data))
return mocked_data, _TRANSCRIPTS, _NORMALIZED_TRANSCRIPT
class TestLJSpeech(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data, _transcripts, _normalized_transcript = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._transcripts, cls._normalized_transcript = get_mock_dataset(cls.root_dir)
def _test_ljspeech(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, normalized_transcript) in enumerate(dataset):
expected_transcript = self._transcripts[i]
expected_normalized_transcript = self._normalized_transcript[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == sample_rate
assert transcript == expected_transcript
assert normalized_transcript == expected_normalized_transcript
n_ite += 1
assert n_ite == len(self.data)
def test_ljspeech_str(self):
dataset = ljspeech.LJSPEECH(self.root_dir)
self._test_ljspeech(dataset)
def test_ljspeech_path(self):
dataset = ljspeech.LJSPEECH(Path(self.root_dir))
self._test_ljspeech(dataset)
|
""" """
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
:param examples:
a list with InputExamples
:param samples_per_label:
the number of consecutive, random and unique samples drawn per label. Batch size should be a multiple of samples_per_label
:param with_replacement:
if this is True, then each sample is drawn at most once (depending on the total number of samples per label).
if this is False, then one sample can be drawn in multiple draws, but still not multiple times in the same
drawing.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
"""
"""
from torch.utils.data import IterableDataset
import numpy as np
from typing import List
from ..readers import InputExample
import logging
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: List[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
:param examples:
a list with InputExamples
:param samples_per_label:
the number of consecutive, random and unique samples drawn per label. Batch size should be a multiple of samples_per_label
:param with_replacement:
if this is True, then each sample is drawn at most once (depending on the total number of samples per label).
if this is False, then one sample can be drawn in multiple draws, but still not multiple times in the same
drawing.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.0", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("5.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("5.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
import os
from pathlib import Path
from typing import Callable, Optional, Union
from .folder import ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where ``root/eurosat`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self.root = os.path.expanduser(root)
self._base_folder = os.path.join(self.root, "eurosat")
self._data_folder = os.path.join(self._base_folder, "2750")
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(self._data_folder, transform=transform, target_transform=target_transform)
self.root = os.path.expanduser(root)
def __len__(self) -> int:
return len(self.samples)
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder)
def download(self) -> None:
if self._check_exists():
return
os.makedirs(self._base_folder, exist_ok=True)
download_and_extract_archive(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
download_root=self._base_folder,
md5="c8fa014336c82ac7804f0398fcb19387",
)
|
import os
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
Args:
root (string): Root directory of dataset where ``root/eurosat`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self.root = os.path.expanduser(root)
self._base_folder = os.path.join(self.root, "eurosat")
self._data_folder = os.path.join(self._base_folder, "2750")
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(self._data_folder, transform=transform, target_transform=target_transform)
self.root = os.path.expanduser(root)
def __len__(self) -> int:
return len(self.samples)
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder)
def download(self) -> None:
if self._check_exists():
return
os.makedirs(self._base_folder, exist_ok=True)
download_and_extract_archive(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
download_root=self._base_folder,
md5="c8fa014336c82ac7804f0398fcb19387",
)
|
import numpy as np
import pytest
import keras
from keras.src import layers
from keras.src import losses
from keras.src import metrics
from keras.src import optimizers
from keras.src import testing
class MyModel(keras.Model):
def __init__(self, hidden_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.dense1 = layers.Dense(hidden_dim, activation="relu")
self.dense2 = layers.Dense(hidden_dim, activation="relu")
self.dense3 = layers.Dense(output_dim)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
class BasicFlowTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_fit(self):
model = MyModel(hidden_dim=2, output_dim=1)
x = np.random.random((128, 4))
y = np.random.random((128, 4))
batch_size = 32
epochs = 3
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
output_before_fit = model(x)
model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
output_after_fit = model(x)
self.assertNotAllClose(output_before_fit, output_after_fit)
def test_basic_fit_no_training(self):
model = MyModel(hidden_dim=2, output_dim=1)
x = np.random.random((128, 4))
model.predict(x)
model(x)
|
import numpy as np
import pytest
import keras
from keras.src import layers
from keras.src import losses
from keras.src import metrics
from keras.src import optimizers
from keras.src import testing
class MyModel(keras.Model):
def __init__(self, hidden_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.dense1 = layers.Dense(hidden_dim, activation="relu")
self.dense2 = layers.Dense(hidden_dim, activation="relu")
self.dense3 = layers.Dense(output_dim)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
@pytest.mark.requires_trainable_backend
class BasicFlowTest(testing.TestCase):
def test_basic_fit(self):
model = MyModel(hidden_dim=2, output_dim=1)
x = np.random.random((128, 4))
y = np.random.random((128, 4))
batch_size = 32
epochs = 3
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
output_before_fit = model(x)
model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
output_after_fit = model(x)
self.assertNotAllClose(output_before_fit, output_after_fit)
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import unittest
import torchaudio
from torchaudio.prototype.pipelines import VGGISH
class VGGishPipelineTest(unittest.TestCase):
def test_vggish(self):
input_sr = VGGISH.sample_rate
input_proc = VGGISH.get_input_processor()
model = VGGISH.get_model()
path = torchaudio.utils.download_asset("test-assets/Chopin_Ballade_-1_In_G_Minor,_Op._23_excerpt.mp3")
waveform, sr = torchaudio.load(path, backend="ffmpeg")
waveform = waveform.mean(axis=0)
waveform = torchaudio.functional.resample(waveform, sr, input_sr)
batch = input_proc(waveform)
assert batch.shape == (62, 1, 96, 64)
output = model(batch)
assert output.shape == (62, 128)
|
import torchaudio
from torchaudio.prototype.pipelines import VGGISH
def test_vggish():
input_sr = VGGISH.sample_rate
input_proc = VGGISH.get_input_processor()
model = VGGISH.get_model()
path = torchaudio.utils.download_asset("test-assets/Chopin_Ballade_-1_In_G_Minor,_Op._23_excerpt.mp3")
waveform, sr = torchaudio.load(path, backend="ffmpeg")
waveform = waveform.mean(axis=0)
waveform = torchaudio.functional.resample(waveform, sr, input_sr)
batch = input_proc(waveform)
assert batch.shape == (62, 1, 96, 64)
output = model(batch)
assert output.shape == (62, 128)
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import (
BatchHardTripletLoss,
BatchHardTripletLossDistanceFunction,
)
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import (
CachedMultipleNegativesSymmetricRankingLoss,
)
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .DistillKLDivLoss import DistillKLDivLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
"DistillKLDivLoss",
]
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import CachedMultipleNegativesSymmetricRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .DistillKLDivLoss import DistillKLDivLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
"DistillKLDivLoss",
]
|
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from pytest_httpx import HTTPXMock
from requests_mock import Mocker
from contextlib import contextmanager
import os
from typing import Generator, Any
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
def test_create_without_base_url(public_class: type, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
monkeypatch.delenv("NVIDIA_BASE_URL", raising=False)
x = public_class()
assert x.base_url == "https://integrate.api.nvidia.com/v1"
assert str(x._client.base_url) == "https://integrate.api.nvidia.com/v1/"
# https.Url
def test_base_url_priority(public_class: type, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
ENV_URL = "https://ENV/v1"
NV_PARAM_URL = "https://NV_PARAM/v1"
PARAM_URL = "https://PARAM/v1"
def get_base_url(**kwargs: Any) -> str:
return public_class(model="NV-Embed-QA", **kwargs).base_url
with no_env_var("NVIDIA_BASE_URL"):
os.environ["NVIDIA_BASE_URL"] = ENV_URL
assert get_base_url() == ENV_URL
assert get_base_url(base_url=NV_PARAM_URL) == NV_PARAM_URL
assert get_base_url(base_url=PARAM_URL) == PARAM_URL
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(
public_class: type, base_url: str, monkeypatch
) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
monkeypatch.delenv("NVIDIA_BASE_URL", raising=False)
with pytest.raises(ValueError) as e:
public_class(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8888/embeddings",
"http://0.0.0.0:8888/rankings",
"http://localhost:8888/embeddings/",
"http://0.0.0.0:8888/rankings/",
"http://localhost:8888/chat/completions",
"http://localhost:8080/v1/embeddings",
"http://0.0.0.0:8888/v1/rankings",
],
)
def test_expect_warn(public_class: type, base_url: str) -> None:
with pytest.warns(UserWarning) as record:
public_class(model="model1", base_url=base_url)
assert len(record) == 1
assert "does not end in /v1" in str(record[0].message)
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8080/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_local_models: None) -> None:
with pytest.warns(UserWarning):
cls = Interface(base_url=base_url)
assert cls._is_hosted is False
assert cls.model == "model1"
@contextmanager
def no_env_var(var: str) -> Generator[None, None, None]:
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield
finally:
if val:
os.environ[var] = val
else:
if var in os.environ:
del os.environ[var]
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1/",
],
)
def test_proxy_base_url(
public_class: type, base_url: str, requests_mock: Mocker
) -> None:
with no_env_var("NVIDIA_BASE_URL"):
client = public_class(
api_key="NO_API_KEY_PROVIDED", model="NV-Embed-QA", base_url=base_url
)
assert base_url.startswith(client.base_url)
|
import pytest
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from pytest_httpx import HTTPXMock
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock, base_url: str):
mock_response = {
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
}
]
}
httpx_mock.add_response(
url=f"{base_url}/models",
method="GET",
json=mock_response,
status_code=200,
)
# test case for base_url warning
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8888/embeddings",
],
)
def test_base_url_invalid_not_hosted(base_url: str, mock_local_models) -> None:
with pytest.warns(UserWarning) as msg:
cls = Interface(base_url=base_url)
assert cls._is_hosted is False
assert len(msg) == 2
assert "Expected format is " in str(msg[0].message)
@pytest.mark.parametrize(
"base_url",
[
"http://localhost:8080/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_local_models: None) -> None:
with pytest.warns(UserWarning):
cls = Interface(base_url=base_url)
assert cls._is_hosted is False
assert cls.model == "model1"
# @pytest.mark.parametrize("base_url", ["https://integrate.api.nvidia.com/v1/"])
# def test_base_url_valid_hosted(base_url: str) -> None:
# Interface(base_url=base_url)
|
__version__ = "2.7.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .quantization import quantize_embeddings
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"quantize_embeddings",
]
|
__version__ = "2.6.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .quantization import quantize_embeddings
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"quantize_embeddings",
]
|
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
def test_lumina_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
output_with_prompt = pipe(**inputs).images[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")
do_classifier_free_guidance = inputs["guidance_scale"] > 1
(
prompt_embeds,
prompt_attention_mask,
negative_prompt_embeds,
negative_prompt_attention_mask,
) = pipe.encode_prompt(
prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
device=torch_device,
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
prompt_attention_mask=prompt_attention_mask,
**inputs,
).images[0]
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4
|
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = GemmaConfig(
head_dim=2,
hidden_size=8,
intermediate_size=37,
num_attention_heads=4,
num_hidden_layers=2,
num_key_value_heads=4,
)
text_encoder = GemmaForCausalLM(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
def test_lumina_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
output_with_prompt = pipe(**inputs).images[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")
do_classifier_free_guidance = inputs["guidance_scale"] > 1
(
prompt_embeds,
prompt_attention_mask,
negative_prompt_embeds,
negative_prompt_attention_mask,
) = pipe.encode_prompt(
prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
device=torch_device,
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
prompt_attention_mask=prompt_attention_mask,
**inputs,
).images[0]
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
try:
from video_torch_encoder import VideoTorchEncoder
except:
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(on='/test', inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos], return_results=True)
assert resp[0].docs[0].embedding.shape == (512, )
assert resp[0].docs[1].embedding.shape == (512,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
from jina import Document, Flow
try:
from video_torch_encoder import VideoTorchEncoder
except:
from jinahub.encoder.video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(on='/test', inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos], return_results=True)
assert resp[0].docs[0].embedding.shape == (512, )
assert resp[0].docs[1].embedding.shape == (512,)
|
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "gtsrb"
self._target_folder = (
self._base_folder / "GTSRB" / ("Training" if self._split == "train" else "Final_Test/Images")
)
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
if self._split == "train":
samples = make_dataset(str(self._target_folder), extensions=(".ppm",))
else:
with open(self._base_folder / "GT-final_test.csv") as csv_file:
samples = [
(str(self._target_folder / row["Filename"]), int(row["ClassId"]))
for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
]
self._samples = samples
self.transform = transform
self.target_transform = target_transform
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
path, target = self._samples[index]
sample = PIL.Image.open(path).convert("RGB")
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def _check_exists(self) -> bool:
return self._target_folder.is_dir()
def download(self) -> None:
if self._check_exists():
return
base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
if self._split == "train":
download_and_extract_archive(
f"{base_url}GTSRB-Training_fixed.zip",
download_root=str(self._base_folder),
md5="513f3c79a4c5141765e10e952eaa2478",
)
else:
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_Images.zip",
download_root=str(self._base_folder),
md5="c7e4e6327067d32654124b0fe9e82185",
)
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_GT.zip",
download_root=str(self._base_folder),
md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
)
|
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "gtsrb"
self._target_folder = (
self._base_folder / "GTSRB" / ("Training" if self._split == "train" else "Final_Test/Images")
)
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
if self._split == "train":
samples = make_dataset(str(self._target_folder), extensions=(".ppm",))
else:
with open(self._base_folder / "GT-final_test.csv") as csv_file:
samples = [
(str(self._target_folder / row["Filename"]), int(row["ClassId"]))
for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
]
self._samples = samples
self.transform = transform
self.target_transform = target_transform
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
path, target = self._samples[index]
sample = PIL.Image.open(path).convert("RGB")
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def _check_exists(self) -> bool:
return self._target_folder.is_dir()
def download(self) -> None:
if self._check_exists():
return
base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
if self._split == "train":
download_and_extract_archive(
f"{base_url}GTSRB-Training_fixed.zip",
download_root=str(self._base_folder),
md5="513f3c79a4c5141765e10e952eaa2478",
)
else:
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_Images.zip",
download_root=str(self._base_folder),
md5="c7e4e6327067d32654124b0fe9e82185",
)
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_GT.zip",
download_root=str(self._base_folder),
md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
)
|
from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "thenlper/gte-large"
class OctoAIEmbeddings(OpenAIEmbeddings):
"""OctoAI Compute Service embedding models.
See https://octo.ai/ for information about OctoAI.
To use, you should have the ``openai`` python package installed and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token.
Alternatively, you can use the octoai_api_token keyword argument.
"""
octoai_api_token: Optional[SecretStr] = Field(default=None)
"""OctoAI Endpoints API keys."""
endpoint_url: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests."""
model: str = Field(default=DEFAULT_MODEL)
"""Model name to use."""
tiktoken_enabled: bool = False
"""Set this to False for non-OpenAI implementations of the embeddings API"""
@property
def _llm_type(self) -> str:
"""Return type of embeddings model."""
return "octoai-embeddings"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"octoai_api_token": "OCTOAI_API_TOKEN"}
@pre_init
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["endpoint_url"] = get_from_dict_or_env(
values,
"endpoint_url",
"ENDPOINT_URL",
default=DEFAULT_API_BASE,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model"] = get_from_dict_or_env(
values,
"model",
"MODEL",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["endpoint_url"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).embeddings
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).embeddings
else:
values["openai_api_base"] = values["endpoint_url"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.Embedding
values["async_client"] = openai.Embedding
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
|
from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "thenlper/gte-large"
class OctoAIEmbeddings(OpenAIEmbeddings):
"""OctoAI Compute Service embedding models.
See https://octo.ai/ for information about OctoAI.
To use, you should have the ``openai`` python package installed and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token.
Alternatively, you can use the octoai_api_token keyword argument.
"""
octoai_api_token: Optional[SecretStr] = Field(default=None)
"""OctoAI Endpoints API keys."""
endpoint_url: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests."""
model: str = Field(default=DEFAULT_MODEL)
"""Model name to use."""
tiktoken_enabled: bool = False
"""Set this to False for non-OpenAI implementations of the embeddings API"""
@property
def _llm_type(self) -> str:
"""Return type of embeddings model."""
return "octoai-embeddings"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"octoai_api_token": "OCTOAI_API_TOKEN"}
@pre_init
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["endpoint_url"] = get_from_dict_or_env(
values,
"endpoint_url",
"ENDPOINT_URL",
default=DEFAULT_API_BASE,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model"] = get_from_dict_or_env(
values,
"model",
"MODEL",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["endpoint_url"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).embeddings
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).embeddings
else:
values["openai_api_base"] = values["endpoint_url"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.Embedding # type: ignore[attr-defined]
values["async_client"] = openai.Embedding # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfNoSoxDecoder,
skipIfNoSoxEncoder,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxDecoder",
"skipIfNoSoxEncoder",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
import csv
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, datasets, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "bert-base-uncased"
train_batch_size = 8
num_epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_tsdae-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Defining our sentence transformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
evaluation_steps = 1000
logging.info("Training sentences: {}".format(len(train_sentences)))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
output_path=model_save_path,
weight_decay=0,
warmup_steps=100,
optimizer_params={"lr": 3e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
from torch.utils.data import DataLoader
from sentence_transformers import models, losses, datasets
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "bert-base-uncased"
train_batch_size = 8
num_epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/training_stsb_tsdae-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Defining our sentence transformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
evaluation_steps = 1000
logging.info("Training sentences: {}".format(len(train_sentences)))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
output_path=model_save_path,
weight_decay=0,
warmup_steps=100,
optimizer_params={"lr": 3e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
class ConvertersTest(testing.TestCase):
def setUp(self):
xyxy_box = np.array(
[[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32"
)
yxyx_box = np.array(
[[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32"
)
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]],
dtype="float32",
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]],
dtype="float32",
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
center_yxhw_box = np.array(
[[[70, 60, 100, 100], [80, 70, 100, 100]]], dtype="float32"
)
xywh_box = np.array(
[[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32"
)
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
self.images = np.ones([2, 1000, 1000, 3], dtype="float32")
self.height = 1000
self.width = 1000
self.boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
"center_yxhw": center_yxhw_box,
}
@parameterized.named_parameters(
*[
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
[
"xyxy",
"yxyx",
"xywh",
"rel_xyxy",
"rel_yxyx",
"center_xywh",
"center_yxhw",
],
2,
)
]
+ [("xyxy_xyxy", "xyxy", "xyxy")]
)
def test_convert_all_formats(self, source, target):
source_box = self.boxes[source]
target_box = self.boxes[target]
self.assertAllClose(
convert_format(
source_box,
source=source,
target=target,
height=self.height,
width=self.width,
),
target_box,
)
def test_convert_format_invalid_source(self):
boxes = self.boxes["xywh"]
with self.assertRaises(ValueError):
convert_format(boxes, source="invalid", target="xywh")
def test_convert_format_invalid_target(self):
boxes = self.boxes["xyxy"]
with self.assertRaises(ValueError):
convert_format(boxes, source="xyxy", target="invalid")
def test_convert_format_missing_dimensions(self):
boxes = self.boxes["xyxy"]
with self.assertRaisesRegex(
ValueError, r"must receive `height` and `width`"
):
convert_format(boxes, source="xyxy", target="rel_xyxy")
def test_clip_to_image_size(self):
boxes = {
"boxes": np.array([[0.0, 0.0, 1.5, 1.6], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
expected_clipped = {
"boxes": np.array([[0.0, 0.0, 1.0, 1.0], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
clipped_boxes = clip_to_image_size(
boxes, bounding_box_format="rel_xyxy"
)
self.assertAllEqual(clipped_boxes, expected_clipped)
def test_affine_identity(self):
# Test identity transform (no change)
batch_size = self.boxes["xyxy"].shape[0]
transformed_boxes = affine_transform(
boxes=self.boxes["xyxy"],
angle=np.zeros([batch_size], dtype="float32"),
translate_x=np.zeros([batch_size], dtype="float32"),
translate_y=np.zeros([batch_size], dtype="float32"),
scale=np.ones([batch_size], dtype="float32"),
shear_x=np.zeros([batch_size], dtype="float32"),
shear_y=np.zeros([batch_size], dtype="float32"),
height=self.height,
width=self.width,
)
transformed_boxes = ops.convert_to_numpy(transformed_boxes)
self.assertAllClose(self.boxes["xyxy"], transformed_boxes)
|
import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
class ConvertersTest(testing.TestCase):
def setUp(self):
xyxy_box = np.array(
[[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32"
)
yxyx_box = np.array(
[[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32"
)
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]],
dtype="float32",
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]],
dtype="float32",
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
center_yxhw_box = np.array(
[[[70, 60, 100, 100], [80, 70, 100, 100]]], dtype="float32"
)
xywh_box = np.array(
[[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32"
)
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
self.images = np.ones([2, 1000, 1000, 3])
self.height = 1000
self.width = 1000
self.boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
"center_yxhw": center_yxhw_box,
}
@parameterized.named_parameters(
*[
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
[
"xyxy",
"yxyx",
"xywh",
"rel_xyxy",
"rel_yxyx",
"center_xywh",
"center_yxhw",
],
2,
)
]
+ [("xyxy_xyxy", "xyxy", "xyxy")]
)
def test_convert_all_formats(self, source, target):
source_box = self.boxes[source]
target_box = self.boxes[target]
self.assertAllClose(
convert_format(
source_box,
source=source,
target=target,
height=self.height,
width=self.width,
),
target_box,
)
def test_convert_format_invalid_source(self):
boxes = self.boxes["xywh"]
with self.assertRaises(ValueError):
convert_format(boxes, source="invalid", target="xywh")
def test_convert_format_invalid_target(self):
boxes = self.boxes["xyxy"]
with self.assertRaises(ValueError):
convert_format(boxes, source="xyxy", target="invalid")
def test_convert_format_missing_dimensions(self):
boxes = self.boxes["xyxy"]
with self.assertRaisesRegex(
ValueError, r"must receive `height` and `width`"
):
convert_format(boxes, source="xyxy", target="rel_xyxy")
def test_clip_to_image_size(self):
boxes = {
"boxes": np.array([[0.0, 0.0, 1.5, 1.6], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
expected_clipped = {
"boxes": np.array([[0.0, 0.0, 1.0, 1.0], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
clipped_boxes = clip_to_image_size(
boxes, bounding_box_format="rel_xyxy"
)
self.assertAllEqual(clipped_boxes, expected_clipped)
def test_affine_identity(self):
# Test identity transform (no change)
batch_size = self.boxes["xyxy"].shape[0]
transformed_boxes = affine_transform(
boxes=self.boxes["xyxy"],
angle=np.zeros([batch_size]),
translate_x=np.zeros([batch_size]),
translate_y=np.zeros([batch_size]),
scale=np.ones([batch_size]),
shear_x=np.zeros([batch_size]),
shear_y=np.zeros([batch_size]),
height=self.height,
width=self.width,
)
transformed_boxes = ops.convert_to_numpy(transformed_boxes)
self.assertAllClose(self.boxes["xyxy"], transformed_boxes)
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import MLMTransformer, SparseStaticEmbedding, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
SparseStaticEmbedding.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import json
import re
from typing import TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
yaml_str = ""
if match:
yaml_str = match.group("yaml")
else:
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
else:
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = {k: v for k, v in self.pydantic_object.schema().items()}
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> type[T]:
return self.pydantic_object
|
import json
import re
from typing import Type, TypeVar
import yaml
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
T = TypeVar("T", bound=BaseModel)
class YamlOutputParser(BaseOutputParser[T]):
"""Parse YAML output using a pydantic model."""
pydantic_object: Type[T]
"""The pydantic model to parse."""
pattern: re.Pattern = re.compile(
r"^```(?:ya?ml)?(?P<yaml>[^`]*)", re.MULTILINE | re.DOTALL
)
"""Regex pattern to match yaml code blocks
within triple backticks with optional yaml or yml prefix."""
def parse(self, text: str) -> T:
try:
# Greedy search for 1st yaml candidate.
match = re.search(self.pattern, text.strip())
yaml_str = ""
if match:
yaml_str = match.group("yaml")
else:
# If no backticks were present, try to parse the entire output as yaml.
yaml_str = text
json_object = yaml.safe_load(yaml_str)
if hasattr(self.pydantic_object, "model_validate"):
return self.pydantic_object.model_validate(json_object)
else:
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
def get_format_instructions(self) -> str:
# Copy schema to avoid altering original Pydantic schema.
schema = {k: v for k, v in self.pydantic_object.schema().items()}
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure yaml in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return YAML_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "yaml"
@property
def OutputType(self) -> Type[T]:
return self.pydantic_object
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
|
import collections
import json
import logging
import os
import string
from typing import Iterable, List
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> List[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from typing import List, Iterable
import collections
import string
import os
import json
import logging
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
from transformers.utils.import_utils import is_nltk_available, NLTK_IMPORT_ERROR
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> List[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=image_size),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.04),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=img_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=img_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.04,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[8, 11])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
lst: List[str]
return MyDocNested
@pytest.mark.parametrize('doc_vec', [False, True])
def test_to_from_pandas_df(nested_doc_cls, doc_vec):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
lst=["hello", "world"],
),
nested_doc_cls(
text='hello world', image=ImageDoc(), lst=["hello", "world"]
),
]
)
if doc_vec:
da = da.to_doc_vec()
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
'lst',
]
).all()
if doc_vec:
da_from_df = DocVec[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocVec)
else:
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocList)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_without_schema_raise_exception(array_cls):
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls.from_dataframe(df=df)
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc, array_cls):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
# not testing DocVec bc it already fails here (as it should!)
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
@pytest.mark.proto
def test_union_type_error():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_dataframe(docs.to_dataframe())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_dataframe(docs_basic.to_dataframe())
assert docs_copy == docs_basic
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_pandas_tensor_type(tensor_type):
class MyDoc(BaseDoc):
embedding: tensor_type
text: str
image: ImageDoc
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
df_da = da.to_dataframe()
da2 = DocVec[MyDoc].from_dataframe(df_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
lst: List[str]
return MyDocNested
@pytest.mark.parametrize('doc_vec', [False, True])
def test_to_from_pandas_df(nested_doc_cls, doc_vec):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
lst=["hello", "world"],
),
nested_doc_cls(
text='hello world', image=ImageDoc(), lst=["hello", "world"]
),
]
)
if doc_vec:
da = da.to_doc_vec()
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
'lst',
]
).all()
if doc_vec:
da_from_df = DocVec[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocVec)
else:
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocList)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_without_schema_raise_exception(array_cls):
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls.from_dataframe(df=df)
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc, array_cls):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
# not testing DocVec bc it already fails here (as it should!)
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
@pytest.mark.proto
def test_union_type_error():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_dataframe(docs.to_dataframe())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_dataframe(docs_basic.to_dataframe())
assert docs_copy == docs_basic
|
"""Init file of LlamaIndex."""
__version__ = "0.12.30"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.29"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
dist_params = dict(backend='nccl', port=29555)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
dist_params = dict(backend='nccl', port=29555)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
from __future__ import annotations
import operator
from collections.abc import Sequence
from typing import Optional
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from pydantic import ConfigDict
from langchain.retrievers.document_compressors.cross_encoder import BaseCrossEncoder
class CrossEncoderReranker(BaseDocumentCompressor):
"""Document compressor that uses CrossEncoder for reranking."""
model: BaseCrossEncoder
"""CrossEncoder model to use for scoring similarity
between the query and documents."""
top_n: int = 3
"""Number of documents to return."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Rerank documents using CrossEncoder.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
scores = self.model.score([(query, doc.page_content) for doc in documents])
docs_with_scores = list(zip(documents, scores))
result = sorted(docs_with_scores, key=operator.itemgetter(1), reverse=True)
return [doc for doc, _ in result[: self.top_n]]
|
from __future__ import annotations
import operator
from typing import Optional, Sequence
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from pydantic import ConfigDict
from langchain.retrievers.document_compressors.cross_encoder import BaseCrossEncoder
class CrossEncoderReranker(BaseDocumentCompressor):
"""Document compressor that uses CrossEncoder for reranking."""
model: BaseCrossEncoder
"""CrossEncoder model to use for scoring similarity
between the query and documents."""
top_n: int = 3
"""Number of documents to return."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Rerank documents using CrossEncoder.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
scores = self.model.score([(query, doc.page_content) for doc in documents])
docs_with_scores = list(zip(documents, scores))
result = sorted(docs_with_scores, key=operator.itemgetter(1), reverse=True)
return [doc for doc, _ in result[: self.top_n]]
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
send_request_sync(_create_test_data_message(), target=f'{args.host}:{args.port[0]}')
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
import multiprocessing
import os
import signal
import time
import pytest
from jina import Document, DocumentArray, Executor, requests
from jina.clients.request import request_generator
from jina.parsers import set_gateway_parser
from jina.serve.networking.utils import send_request_sync
from jina_cli.api import executor_native, gateway
from tests.helper import _generate_pod_args
class DummyExecutor(Executor):
def __init__(self, dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dir = dir
self.request_count = 0
@requests
def slow_count(self, **kwargs):
time.sleep(0.5)
self.request_count += 1
def close(self):
super().close()
with open(f'{self.dir}/test.txt', 'w') as fp:
fp.write(f'proper close;{self.request_count}')
def _create_test_data_message():
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
return req
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
def test_executor_runtimes(signal, tmpdir):
import time
args = _generate_pod_args()
def run(args):
args.uses = {
'jtype': 'DummyExecutor',
'with': {'dir': str(tmpdir)},
'metas': {'workspace': str(tmpdir)},
}
executor_native(args)
process = multiprocessing.Process(target=run, args=(args,))
process.start()
time.sleep(0.5)
send_request_sync(_create_test_data_message(), target=f'{args.host}:{args.port}')
time.sleep(0.1)
os.kill(process.pid, signal)
process.join()
with open(f'{tmpdir}/test.txt', 'r') as fp:
output = fp.read()
split = output.split(';')
assert split[0] == 'proper close'
assert split[1] == '1'
@pytest.mark.parametrize('signal', [signal.SIGTERM, signal.SIGINT])
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_gateway(signal, protocol):
import time
def run():
args = set_gateway_parser().parse_args(
[
'--protocol',
protocol,
'--graph-description',
'{}',
'--deployments-addresses',
'{}',
]
)
gateway(args)
process = multiprocessing.Process(target=run)
process.start()
time.sleep(0.5)
os.kill(process.pid, signal)
process.join()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg, build_scheduler_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg',
'build_scheduler_from_cfg'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .build_functions import (build_from_cfg, build_model_from_cfg,
build_runner_from_cfg)
from .default_scope import DefaultScope
from .registry import Registry
from .root import (DATA_SAMPLERS, DATASETS, EVALUATOR, HOOKS, LOG_PROCESSORS,
LOOPS, METRICS, MODEL_WRAPPERS, MODELS,
OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS', 'DATASETS',
'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIM_WRAPPER_CONSTRUCTORS', 'TASK_UTILS',
'PARAM_SCHEDULERS', 'METRICS', 'MODEL_WRAPPERS', 'OPTIM_WRAPPERS', 'LOOPS',
'VISBACKENDS', 'VISUALIZERS', 'LOG_PROCESSORS', 'EVALUATOR',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules',
'build_model_from_cfg', 'build_runner_from_cfg', 'build_from_cfg'
]
|
import operator
import pytest
from langchain_core.utils.usage import _dict_int_op
def test_dict_int_op_add() -> None:
left = {"a": 1, "b": 2}
right = {"b": 3, "c": 4}
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 1, "b": 5, "c": 4}
def test_dict_int_op_subtract() -> None:
left = {"a": 5, "b": 10}
right = {"a": 2, "b": 3, "c": 1}
result = _dict_int_op(left, right, lambda x, y: max(x - y, 0))
assert result == {"a": 3, "b": 7, "c": 0}
def test_dict_int_op_nested() -> None:
left = {"a": 1, "b": {"c": 2, "d": 3}}
right = {"a": 2, "b": {"c": 1, "e": 4}}
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 3, "b": {"c": 3, "d": 3, "e": 4}}
def test_dict_int_op_max_depth_exceeded() -> None:
left = {"a": {"b": {"c": 1}}}
right = {"a": {"b": {"c": 2}}}
with pytest.raises(
ValueError, match="max_depth=2 exceeded, unable to combine dicts."
):
_dict_int_op(left, right, operator.add, max_depth=2)
def test_dict_int_op_invalid_types() -> None:
left = {"a": 1, "b": "string"}
right = {"a": 2, "b": 3}
with pytest.raises(
ValueError,
match="Only dict and int values are supported.",
):
_dict_int_op(left, right, operator.add)
|
import operator
import pytest
from langchain_core.utils.usage import _dict_int_op
def test_dict_int_op_add() -> None:
left = {"a": 1, "b": 2}
right = {"b": 3, "c": 4}
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 1, "b": 5, "c": 4}
def test_dict_int_op_subtract() -> None:
left = {"a": 5, "b": 10}
right = {"a": 2, "b": 3, "c": 1}
result = _dict_int_op(left, right, lambda x, y: max(x - y, 0))
assert result == {"a": 3, "b": 7, "c": 0}
def test_dict_int_op_nested() -> None:
left = {"a": 1, "b": {"c": 2, "d": 3}}
right = {"a": 2, "b": {"c": 1, "e": 4}}
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 3, "b": {"c": 3, "d": 3, "e": 4}}
def test_dict_int_op_max_depth_exceeded() -> None:
left = {"a": {"b": {"c": 1}}}
right = {"a": {"b": {"c": 2}}}
with pytest.raises(ValueError):
_dict_int_op(left, right, operator.add, max_depth=2)
def test_dict_int_op_invalid_types() -> None:
left = {"a": 1, "b": "string"}
right = {"a": 2, "b": 3}
with pytest.raises(ValueError):
_dict_int_op(left, right, operator.add)
|
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_add_function.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# Note that zamba does not have the `apply_rotary_pos_emb` function!
from typing import Optional, Tuple
import torch
from torch import nn
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_add_function.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# Note that zamba does not have the `apply_rotary_pos_emb` function!
from typing import Optional, Tuple
import torch
from torch import nn
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://arxiv.org/pdf/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 100]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
encoder = AudioCLIPTextEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {'traversal_paths': [traversal_path]})
for path, count in docs_per_path:
embeddings = (
DocumentArray(docs).traverse_flat([path]).get_attributes('embedding')
)
assert len(list(filter(lambda x: x is not None, embeddings))) == count
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = AudioCLIPTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
def test_multiple_traversal_paths():
sentences = list()
sentences.append('Hello, my name is Michael.')
sentences.append('Today we are going to Disney World.')
sentences.append('There are animals on the road')
sentences.append('A dog is running down the road')
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = AudioCLIPTextEncoder(default_traversal_paths=['r', 'c', 'cc'])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].chunks[0].embedding.shape == (_EMBEDDING_DIM,)
def test_no_docs():
encoder = AudioCLIPTextEncoder()
encoder.encode(None, {})
encoder.encode(DocumentArray(), {})
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes('embedding')
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = AudioCLIPTextEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {'traversal_paths': [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = AudioCLIPTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
def test_multiple_traversal_paths():
sentences = list()
sentences.append('Hello, my name is Michael.')
sentences.append('Today we are going to Disney World.')
sentences.append('There are animals on the road')
sentences.append('A dog is running down the road')
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = AudioCLIPTextEncoder(default_traversal_paths=['r', 'c', 'cc'])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].chunks[0].embedding.shape == (_EMBEDDING_DIM,)
def test_no_docs():
encoder = AudioCLIPTextEncoder()
encoder.encode(None, {})
encoder.encode(DocumentArray(), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.