input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import YOLOV3Head
class TestYOLOV3Head(TestCase):
def test_yolo_head_loss(self):
"""Tests YOLO head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
head = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[1, 1, 1],
train_cfg=Config(
dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0))))
head.init_weights()
# YOLO head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in head.prior_generator.strides
]
predmaps, = head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = head.loss_by_feat(predmaps, [gt_instances],
img_metas)
# When there is no truth, the conf loss should be nonzero but
# cls loss and xy&wh loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()
empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()
empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()
self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')
self.assertEqual(
empty_cls_loss, 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_xy_loss, 0,
'there should be no xy loss when there are no true boxes')
self.assertEqual(
empty_wh_loss, 0,
'there should be no wh loss when there are no true boxes')
# When truth is non-empty then all conf, cls loss and xywh loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = head.loss_by_feat(predmaps, [gt_instances], img_metas)
one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()
one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()
one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()
one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()
self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')
self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')
self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmdet.models.dense_heads import YOLOV3Head
class TestYOLOV3Head(TestCase):
def test_yolo_head_loss(self):
"""Tests YOLO head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
head = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[1, 1, 1],
train_cfg=Config(
dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0))))
head.init_weights()
# YOLO head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in head.prior_generator.strides
]
predmaps, = head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = head.loss_by_feat(predmaps, [gt_instances],
img_metas)
# When there is no truth, the conf loss should be nonzero but
# cls loss and xy&wh loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()
empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()
empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()
self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')
self.assertEqual(
empty_cls_loss, 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_xy_loss, 0,
'there should be no xy loss when there are no true boxes')
self.assertEqual(
empty_wh_loss, 0,
'there should be no wh loss when there are no true boxes')
# When truth is non-empty then all conf, cls loss and xywh loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = head.loss_by_feat(predmaps, [gt_instances], img_metas)
one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()
one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()
one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()
one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()
self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')
self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')
self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> list[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
else:
return ["text"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy"
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> list[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
else:
return ["text"]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy"
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
from langchain_core._api.deprecation import (
LangChainDeprecationWarning,
LangChainPendingDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
AGENT_DEPRECATION_WARNING = (
"LangChain agents will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. For "
"details, refer to the "
"`LangGraph documentation <https://langchain-ai.github.io/langgraph/>`_"
" as well as guides for "
"`Migrating from AgentExecutor <https://python.langchain.com/docs/how_to/migrate_agent/>`_"
" and LangGraph's "
"`Pre-built ReAct agent <https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/>`_."
)
__all__ = [
"AGENT_DEPRECATION_WARNING",
"LangChainDeprecationWarning",
"LangChainPendingDeprecationWarning",
"deprecated",
"suppress_langchain_deprecation_warning",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
]
|
from langchain_core._api.deprecation import (
LangChainDeprecationWarning,
LangChainPendingDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
AGENT_DEPRECATION_WARNING = (
"LangChain agents will continue to be supported, but it is recommended for new "
"use cases to be built with LangGraph. LangGraph offers a more flexible and "
"full-featured framework for building agents, including support for "
"tool-calling, persistence of state, and human-in-the-loop workflows. For "
"details, refer to the "
"`LangGraph documentation <https://langchain-ai.github.io/langgraph/>`_"
" as well as guides for "
"`Migrating from AgentExecutor <https://python.langchain.com/docs/how_to/migrate_agent/>`_" # noqa: E501
" and LangGraph's "
"`Pre-built ReAct agent <https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/>`_." # noqa: E501
)
__all__ = [
"AGENT_DEPRECATION_WARNING",
"LangChainDeprecationWarning",
"LangChainPendingDeprecationWarning",
"deprecated",
"suppress_langchain_deprecation_warning",
"warn_deprecated",
"surface_langchain_deprecation_warnings",
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
sparsity_infos = model.get_sparsity_stats(embeddings)
if (
sparsity_infos["num_rows"] == self.queries_info["length_of_queries"]
and "sparsity_infos" not in self.queries_info.keys()
):
self.queries_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
else:
self.corpus_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
sparsity_infos = model.get_sparsity_stats(embeddings)
if sparsity_infos["num_rows"] == self.queries_info["lenght_of_queries"]:
self.queries_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
else:
self.corpus_info["sparsity_infos"] = model.get_sparsity_stats(embeddings)
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_documents / 8) bytes, i.e. 1.19GB for 10 million embeddings.
"""
import json
import os
import time
import faiss
import numpy as np
from usearch.index import Index
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
# We use usearch as it can efficiently load int8 vectors from disk.
# Load the model
# NOTE: Because we are only comparing questions here, we will use the "query" prompt for everything.
# Normally you don't use this prompt for documents, but only for the queries
model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1",
prompts={"query": "Represent this sentence for searching relevant passages: "},
default_prompt_name="query",
)
# Load a corpus with texts
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# Apply some default query
query = "How do I become a good programmer?"
# Try to load the precomputed binary and int8 indices
if os.path.exists("quora_faiss_ubinary.index"):
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("quora_faiss_ubinary.index")
int8_view = Index.restore("quora_usearch_int8.index", view=True)
else:
# Encode the corpus using the full precision
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# Convert the embeddings to "ubinary" for efficient FAISS search
ubinary_embeddings = quantize_embeddings(full_corpus_embeddings, "ubinary")
binary_index = faiss.IndexBinaryFlat(1024)
binary_index.add(ubinary_embeddings)
faiss.write_index_binary(binary_index, "quora_faiss_ubinary.index")
# Convert the embeddings to "int8" for efficiently loading int8 indices with usearch
int8_embeddings = quantize_embeddings(full_corpus_embeddings, "int8")
index = Index(ndim=1024, metric="ip", dtype="i8")
index.add(np.arange(len(int8_embeddings)), int8_embeddings)
index.save("quora_usearch_int8.index")
del index
# Load the int8 index as a view, which does not cost any memory
int8_view = Index.restore("quora_usearch_int8.index", view=True)
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
quantize_time = time.time() - start_time
# 3. Search the binary index
start_time = time.time()
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = int8_view[binary_ids].astype(int)
load_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = (-scores).argsort()[:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
return (
top_k_scores.tolist(),
top_k_indices.tolist(),
{
"Embed Time": f"{embed_time:.4f} s",
"Quantize Time": f"{quantize_time:.4f} s",
"Search Time": f"{search_time:.4f} s",
"Load Time": f"{load_time:.4f} s",
"Rescore Time": f"{rescore_time:.4f} s",
"Sort Time": f"{sort_time:.4f} s",
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
},
)
while True:
scores, indices, timings = search(query)
# Output the results
print(f"Timings:\n{json.dumps(timings, indent=2)}")
print(f"Query: {query}")
for score, index in zip(scores, indices):
print(f"(Score: {score:.4f}) {corpus[index]}")
print("")
# 10. Prompt for more queries
query = input("Please enter a question: ")
|
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_documents / 8) bytes, i.e. 1.19GB for 10 million embeddings.
"""
import json
import os
import time
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
from datasets import load_dataset
import faiss
from usearch.index import Index
# We use usearch as it can efficiently load int8 vectors from disk.
# Load the model
# NOTE: Because we are only comparing questions here, we will use the "query" prompt for everything.
# Normally you don't use this prompt for documents, but only for the queries
model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1",
prompts={"query": "Represent this sentence for searching relevant passages: "},
default_prompt_name="query",
)
# Load a corpus with texts
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# Apply some default query
query = "How do I become a good programmer?"
# Try to load the precomputed binary and int8 indices
if os.path.exists("quora_faiss_ubinary.index"):
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("quora_faiss_ubinary.index")
int8_view = Index.restore("quora_usearch_int8.index", view=True)
else:
# Encode the corpus using the full precision
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# Convert the embeddings to "ubinary" for efficient FAISS search
ubinary_embeddings = quantize_embeddings(full_corpus_embeddings, "ubinary")
binary_index = faiss.IndexBinaryFlat(1024)
binary_index.add(ubinary_embeddings)
faiss.write_index_binary(binary_index, "quora_faiss_ubinary.index")
# Convert the embeddings to "int8" for efficiently loading int8 indices with usearch
int8_embeddings = quantize_embeddings(full_corpus_embeddings, "int8")
index = Index(ndim=1024, metric="ip", dtype="i8")
index.add(np.arange(len(int8_embeddings)), int8_embeddings)
index.save("quora_usearch_int8.index")
del index
# Load the int8 index as a view, which does not cost any memory
int8_view = Index.restore("quora_usearch_int8.index", view=True)
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
quantize_time = time.time() - start_time
# 3. Search the binary index
start_time = time.time()
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = int8_view[binary_ids].astype(int)
load_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = (-scores).argsort()[:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
return (
top_k_scores.tolist(),
top_k_indices.tolist(),
{
"Embed Time": f"{embed_time:.4f} s",
"Quantize Time": f"{quantize_time:.4f} s",
"Search Time": f"{search_time:.4f} s",
"Load Time": f"{load_time:.4f} s",
"Rescore Time": f"{rescore_time:.4f} s",
"Sort Time": f"{sort_time:.4f} s",
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
},
)
while True:
scores, indices, timings = search(query)
# Output the results
print(f"Timings:\n{json.dumps(timings, indent=2)}")
print(f"Query: {query}")
for score, index in zip(scores, indices):
print(f"(Score: {score:.4f}) {corpus[index]}")
print("")
# 10. Prompt for more queries
query = input("Please enter a question: ")
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .sparse_rcnn import SparseRCNN
@MODELS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(QueryInst, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .sparse_rcnn import SparseRCNN
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(QueryInst, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from subprocess import check_output
from typing import List
import pytest
from datasets.packaged_modules import _PACKAGED_DATASETS_MODULES
from datasets.utils.logging import get_logger
from datasets.utils.metadata import DatasetMetadata
from datasets.utils.readme import ReadMe
from .utils import slow
repo_path = Path.cwd()
logger = get_logger(__name__)
def get_changed_datasets(repo_path: Path) -> List[Path]:
diff_output = check_output(["git", "diff", "--name-only", "origin/main...HEAD"], cwd=repo_path)
changed_files = [Path(repo_path, f) for f in diff_output.decode().splitlines()]
datasets_dir_path = repo_path / "datasets"
changed_datasets = {
f.resolve().relative_to(datasets_dir_path).parts[0]
for f in changed_files
if f.exists() and str(f.resolve()).startswith(str(datasets_dir_path)) and not f.name == "dummy_data.zip"
}
return sorted(dataset_name for dataset_name in changed_datasets if dataset_name not in _PACKAGED_DATASETS_MODULES)
def get_all_datasets(repo_path: Path) -> List[Path]:
dataset_names = [
path.parts[-1]
for path in (repo_path / "datasets").iterdir()
if path.is_dir() and (path / path.name).with_suffix(".py").is_file()
]
return [dataset_name for dataset_name in dataset_names if dataset_name not in _PACKAGED_DATASETS_MODULES]
@pytest.mark.parametrize("dataset_name", get_changed_datasets(repo_path))
def test_changed_dataset_card(dataset_name):
"""Validate the content of the dataset cards that were changed"""
card_path = repo_path / "datasets" / dataset_name / "README.md"
assert card_path.exists()
error_messages = []
try:
ReadMe.from_readme(card_path)
except Exception as readme_parsing_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Parsing:\n{readme_parsing_error}"
)
try:
ReadMe.from_readme(card_path, suppress_parsing_errors=True)
except Exception as readme_validation_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Validation:\n{readme_validation_error}"
)
try:
metadata = DatasetMetadata.from_readme(card_path)
assert metadata, "empty metadata"
except Exception as metadata_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nYAML tags:\n{metadata_error}"
)
if error_messages:
raise ValueError("\n".join(error_messages))
@pytest.mark.parametrize("dataset_name", get_all_datasets(repo_path))
def test_dataset_card_yaml_structure(dataset_name):
"""
Just check that the dataset cards have valid YAML.
It doesn't validate the content.
"""
card_path = repo_path / "datasets" / dataset_name / "README.md"
assert card_path.exists()
metadata_dict = DatasetMetadata.from_readme(card_path)
assert len(metadata_dict) > 0
@slow
@pytest.mark.parametrize("dataset_name", get_all_datasets(repo_path))
def test_dataset_card(dataset_name):
"""Validate the content of the dataset cards"""
card_path = repo_path / "datasets" / dataset_name / "README.md"
assert card_path.exists()
error_messages = []
try:
readme = ReadMe.from_readme(card_path)
except Exception as readme_parsing_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Parsing:\n{readme_parsing_error}"
)
try:
readme = ReadMe.from_readme(card_path, suppress_parsing_errors=True)
readme.validate()
except Exception as readme_validation_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Validation:\n{readme_validation_error}"
)
try:
metadata = DatasetMetadata.from_readme(card_path)
assert metadata
except Exception as metadata_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nYAML tags:\n{metadata_error}"
)
if error_messages:
raise ValueError("\n".join(error_messages))
|
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from subprocess import check_output
from typing import List
import pytest
from datasets.packaged_modules import _PACKAGED_DATASETS_MODULES
from datasets.utils.logging import get_logger
from datasets.utils.metadata import DatasetMetadata, validate_metadata_type, yaml_block_from_readme
from datasets.utils.readme import ReadMe
from .utils import slow
repo_path = Path.cwd()
logger = get_logger(__name__)
def get_changed_datasets(repo_path: Path) -> List[Path]:
diff_output = check_output(["git", "diff", "--name-only", "origin/main...HEAD"], cwd=repo_path)
changed_files = [Path(repo_path, f) for f in diff_output.decode().splitlines()]
datasets_dir_path = repo_path / "datasets"
changed_datasets = {
f.resolve().relative_to(datasets_dir_path).parts[0]
for f in changed_files
if f.exists() and str(f.resolve()).startswith(str(datasets_dir_path)) and not f.name == "dummy_data.zip"
}
return sorted(dataset_name for dataset_name in changed_datasets if dataset_name not in _PACKAGED_DATASETS_MODULES)
def get_all_datasets(repo_path: Path) -> List[Path]:
dataset_names = [path.parts[-1] for path in (repo_path / "datasets").iterdir() if path.is_dir()]
return [dataset_name for dataset_name in dataset_names if dataset_name not in _PACKAGED_DATASETS_MODULES]
@pytest.mark.parametrize("dataset_name", get_changed_datasets(repo_path))
def test_changed_dataset_card(dataset_name):
"""Validate the content of the dataset cards that were changed"""
card_path = repo_path / "datasets" / dataset_name / "README.md"
assert card_path.exists()
error_messages = []
try:
readme = ReadMe.from_readme(card_path)
except Exception as readme_parsing_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Parsing:\n{readme_parsing_error}"
)
try:
readme = ReadMe.from_readme(card_path, suppress_parsing_errors=True)
readme.validate()
except Exception as readme_validation_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Validation:\n{readme_validation_error}"
)
try:
metadata = DatasetMetadata.from_readme(card_path)
metadata.validate()
except Exception as metadata_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nYAML tags:\n{metadata_error}"
)
if error_messages:
raise ValueError("\n".join(error_messages))
@pytest.mark.parametrize("dataset_name", get_all_datasets(repo_path))
def test_dataset_card_yaml_structure(dataset_name):
"""
Just check that the dataset cards have valid YAML.
It doesn't validate the content.
"""
card_path = repo_path / "datasets" / dataset_name / "README.md"
assert card_path.exists()
yaml_string = yaml_block_from_readme(card_path)
metadata_dict = DatasetMetadata._metadata_dict_from_yaml_string(yaml_string)
assert len(metadata_dict) > 0
validate_metadata_type(metadata_dict)
@slow
@pytest.mark.parametrize("dataset_name", get_all_datasets(repo_path))
def test_dataset_card(dataset_name):
"""Validate the content of the dataset cards"""
card_path = repo_path / "datasets" / dataset_name / "README.md"
assert card_path.exists()
error_messages = []
try:
readme = ReadMe.from_readme(card_path)
except Exception as readme_parsing_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Parsing:\n{readme_parsing_error}"
)
try:
readme = ReadMe.from_readme(card_path, suppress_parsing_errors=True)
readme.validate()
except Exception as readme_validation_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nREADME Validation:\n{readme_validation_error}"
)
try:
metadata = DatasetMetadata.from_readme(card_path)
metadata.validate()
except Exception as metadata_error:
error_messages.append(
f"The following issues have been found in the dataset cards:\nYAML tags:\n{metadata_error}"
)
if error_messages:
raise ValueError("\n".join(error_messages))
|
_base_ = 'ssj_270k_coco-instance.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
load_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=image_size),
]
train_pipeline = [
dict(type='CopyPaste', max_num_pasted=100),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=load_pipeline,
backend_args=backend_args),
pipeline=train_pipeline))
|
_base_ = 'ssj_270k_coco-instance.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
load_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=image_size),
]
train_pipeline = [
dict(type='CopyPaste', max_num_pasted=100),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=load_pipeline),
pipeline=train_pipeline))
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return self._parse_ai_message(message)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return self._parse_ai_message(message)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
raise ValueError("Can only parse messages")
|
from keras.src.api_export import keras_export
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.muon import Muon
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
Optimizer,
Adam,
SGD,
RMSprop,
Adadelta,
AdamW,
Adagrad,
Adamax,
Adafactor,
Nadam,
Ftrl,
Lion,
LossScaleOptimizer,
}
ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS}
@keras_export("keras.optimizers.serialize")
def serialize(optimizer):
"""Returns the optimizer configuration as a Python dict.
Args:
optimizer: An `Optimizer` instance to serialize.
Returns:
Python dict which contains the configuration of the optimizer.
"""
return serialization_lib.serialize_keras_object(optimizer)
@keras_export("keras.optimizers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras optimizer object via its configuration.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras Optimizer instance.
"""
# Make deserialization case-insensitive for built-in optimizers.
if config["class_name"].lower() in ALL_OBJECTS_DICT:
config["class_name"] = config["class_name"].lower()
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.optimizers.get")
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of:
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
Returns:
A Keras Optimizer instance.
"""
if identifier is None:
return None
elif isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": identifier, "config": {}}
obj = deserialize(config)
else:
obj = identifier
if isinstance(obj, Optimizer):
return obj
raise ValueError(f"Could not interpret optimizer identifier: {identifier}")
# We will add this temporarily so that tensorflow packages that depend on
# estimators will continue to import (there are a large number). Note that
# Keras 3 will not work with the estimators API.
@keras_export(
[
"keras.optimizers.legacy.Adagrad",
"keras.optimizers.legacy.Adam",
"keras.optimizers.legacy.Ftrl",
"keras.optimizers.legacy.RMSprop",
"keras.optimizers.legacy.SGD",
"keras.optimizers.legacy.Optimizer",
]
)
class LegacyOptimizerWarning:
def __init__(self, *args, **kwargs):
raise ImportError(
"`keras.optimizers.legacy` is not supported in Keras 3. When using "
"`tf.keras`, to continue using a `tf.keras.optimizers.legacy` "
"optimizer, you can install the `tf_keras` package (Keras 2) and "
"set the environment variable `TF_USE_LEGACY_KERAS=True` to "
"configure TensorFlow to use `tf_keras` when accessing `tf.keras`."
)
|
from keras.src.api_export import keras_export
from keras.src.optimizers.adadelta import Adadelta
from keras.src.optimizers.adafactor import Adafactor
from keras.src.optimizers.adagrad import Adagrad
from keras.src.optimizers.adam import Adam
from keras.src.optimizers.adamax import Adamax
from keras.src.optimizers.adamw import AdamW
from keras.src.optimizers.ftrl import Ftrl
from keras.src.optimizers.lion import Lion
from keras.src.optimizers.loss_scale_optimizer import LossScaleOptimizer
from keras.src.optimizers.nadam import Nadam
from keras.src.optimizers.optimizer import Optimizer
from keras.src.optimizers.rmsprop import RMSprop
from keras.src.optimizers.sgd import SGD
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
Optimizer,
Adam,
SGD,
RMSprop,
Adadelta,
AdamW,
Adagrad,
Adamax,
Adafactor,
Nadam,
Ftrl,
Lion,
LossScaleOptimizer,
}
ALL_OBJECTS_DICT = {cls.__name__.lower(): cls for cls in ALL_OBJECTS}
@keras_export("keras.optimizers.serialize")
def serialize(optimizer):
"""Returns the optimizer configuration as a Python dict.
Args:
optimizer: An `Optimizer` instance to serialize.
Returns:
Python dict which contains the configuration of the optimizer.
"""
return serialization_lib.serialize_keras_object(optimizer)
@keras_export("keras.optimizers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras optimizer object via its configuration.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras Optimizer instance.
"""
# Make deserialization case-insensitive for built-in optimizers.
if config["class_name"].lower() in ALL_OBJECTS_DICT:
config["class_name"] = config["class_name"].lower()
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.optimizers.get")
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of:
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
Returns:
A Keras Optimizer instance.
"""
if identifier is None:
return None
elif isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": identifier, "config": {}}
obj = deserialize(config)
else:
obj = identifier
if isinstance(obj, Optimizer):
return obj
raise ValueError(f"Could not interpret optimizer identifier: {identifier}")
# We will add this temporarily so that tensorflow packages that depend on
# estimators will continue to import (there are a large number). Note that
# Keras 3 will not work with the estimators API.
@keras_export(
[
"keras.optimizers.legacy.Adagrad",
"keras.optimizers.legacy.Adam",
"keras.optimizers.legacy.Ftrl",
"keras.optimizers.legacy.RMSprop",
"keras.optimizers.legacy.SGD",
"keras.optimizers.legacy.Optimizer",
]
)
class LegacyOptimizerWarning:
def __init__(self, *args, **kwargs):
raise ImportError(
"`keras.optimizers.legacy` is not supported in Keras 3. When using "
"`tf.keras`, to continue using a `tf.keras.optimizers.legacy` "
"optimizer, you can install the `tf_keras` package (Keras 2) and "
"set the environment variable `TF_USE_LEGACY_KERAS=True` to "
"configure TensorFlow to use `tf_keras` when accessing `tf.keras`."
)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import tqdm as _tqdm # _tqdm is the module
from .experimental import experimental
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .tqdm import (
are_progress_bars_disabled,
disable_progress_bars,
enable_progress_bars,
tqdm,
)
from .version import Version
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa
from . import tqdm as _tqdm # _tqdm is the module
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
from .tqdm import (
disable_progress_bars,
enable_progress_bars,
are_progress_bars_disabled,
tqdm,
)
|
"""DashScope api utils."""
from http import HTTPStatus
from typing import Any, Dict, List, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
def dashscope_response_to_completion_response(
response: Any, stream: bool = False
) -> CompletionResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
if not content:
content = ""
return CompletionResponse(text=content, raw=response)
else:
return CompletionResponse(text="", raw=response)
def dashscope_response_to_chat_response(
response: Any,
) -> ChatResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
if not content:
content = ""
role = response["output"]["choices"][0]["message"]["role"]
additional_kwargs = response["output"]["choices"][0]["message"]
return ChatResponse(
message=ChatMessage(
role=role, content=content, additional_kwargs=additional_kwargs
),
raw=response,
)
else:
return ChatResponse(message=ChatMessage(), raw=response)
def chat_message_to_dashscope_messages(
chat_messages: Sequence[ChatMessage],
) -> List[Dict]:
messages = []
for msg in chat_messages:
additional_kwargs = msg.additional_kwargs
if msg.role == "assistant":
messages.append(
{
"role": msg.role.value,
"content": msg.content,
"tool_calls": additional_kwargs.get("tool_calls", []),
}
)
elif msg.role == "tool":
messages.append(
{
"role": msg.role.value,
"content": msg.content,
"tool_call_id": additional_kwargs.get("tool_call_id", ""),
"name": additional_kwargs.get("name", ""),
}
)
else:
messages.append({"role": msg.role.value, "content": msg.content})
return messages
|
"""DashScope api utils."""
from http import HTTPStatus
from typing import Any, Dict, List, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
def dashscope_response_to_completion_response(
response: Any, stream: bool = False
) -> CompletionResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
if not content:
content = ""
return CompletionResponse(text=content, raw=response)
else:
return CompletionResponse(text="", raw=response)
def dashscope_response_to_chat_response(
response: Any,
) -> ChatResponse:
if response["status_code"] == HTTPStatus.OK:
content = response["output"]["choices"][0]["message"]["content"]
if not content:
content = ""
role = response["output"]["choices"][0]["message"]["role"]
additional_kwargs = response["output"]["choices"][0]["message"]
return ChatResponse(
message=ChatMessage(
role=role, content=content, additional_kwargs=additional_kwargs
),
raw=response,
)
else:
return ChatResponse(message=ChatMessage(), raw=response)
def chat_message_to_dashscope_messages(
chat_messages: Sequence[ChatMessage],
) -> List[Dict]:
messages = []
for msg in chat_messages:
messages.append({"role": msg.role.value, "content": msg.content})
return messages
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_instructblipvideo import *
from .image_processing_instructblipvideo import *
from .modeling_instructblipvideo import *
from .processing_instructblipvideo import *
from .video_processing_instructblipvideo import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_instructblipvideo import *
from .image_processing_instructblipvideo import *
from .modeling_instructblipvideo import *
from .processing_instructblipvideo import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self,
pred_instances,
gt_instances,
gt_instances_ignore=None,
**kwargs):
"""Assign boxes to either a ground truth boxes or a negative boxes."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign boxes to either a ground truth boxes or a negative boxes."""
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils.misc import is_torch_available
T = TypeVar('T', bound='Image')
torch_available = is_torch_available()
if torch_available:
import torch
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
T = TypeVar('T', bound='Image')
try:
import torch
torch_available = True
except ImportError:
torch_available = False
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'DefaultScope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
from langchain_core.messages import __all__
EXPECTED_ALL = [
"MessageLikeRepresentation",
"_message_from_dict",
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_image_block",
"convert_to_openai_messages",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
from langchain_core.messages import __all__
EXPECTED_ALL = [
"MessageLikeRepresentation",
"_message_from_dict",
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"convert_to_messages",
"get_buffer_string",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
# Save the model
model.push_to_hub(
"splade_example",
private=True,
)
# Load the model
loaded_model = SparseEncoder("arthurbresnu/splade_example")
print(f"Loaded model: {loaded_model}")
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = loaded_model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {loaded_model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = loaded_model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['euclidean'].value
<= d.matches[1].scores['euclidean'].value
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert d.matches[0].scores['l2'].value >= d.matches[1].scores['l2'].value
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .dataset import *
from .data import *
from .fileio import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .dataset import *
from .fileio import *
from .registry import *
from .utils import *
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
You need Elasticsearch up and running locally:
https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/, e.g.:
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train", trust_remote_code=True).map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
sparse_model = SparseEncoder("sparse-embedding/splade_example")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=10,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_elasticsearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train", trust_remote_code=True).map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
sparse_model = SparseEncoder("sparse-embedding/splade_example")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_elasticsearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=10,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""Transformers for missing value imputation."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from ._base import MissingIndicator, SimpleImputer
from ._knn import KNNImputer
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from ._iterative import IterativeImputer # noqa: F401
__all__ = ["KNNImputer", "MissingIndicator", "SimpleImputer"]
# TODO: remove this check once the estimator is no longer experimental.
def __getattr__(name):
if name == "IterativeImputer":
raise ImportError(
f"{name} is experimental and the API might change without any "
"deprecation cycle. To use it, you need to explicitly import "
"enable_iterative_imputer:\n"
"from sklearn.experimental import enable_iterative_imputer"
)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
"""Transformers for missing value imputation."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from ._base import MissingIndicator, SimpleImputer
from ._knn import KNNImputer
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from ._iterative import IterativeImputer # noqa
__all__ = ["KNNImputer", "MissingIndicator", "SimpleImputer"]
# TODO: remove this check once the estimator is no longer experimental.
def __getattr__(name):
if name == "IterativeImputer":
raise ImportError(
f"{name} is experimental and the API might change without any "
"deprecation cycle. To use it, you need to explicitly import "
"enable_iterative_imputer:\n"
"from sklearn.experimental import enable_iterative_imputer"
)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
from typing import Dict
def get_default_metas() -> Dict:
"""
Get a copy of default meta variables.
NOTE: DO NOT ADD MORE ENTRIES HERE!
:return: a deep copy of the default metas in a new dict
"""
# NOTE: DO NOT ADD MORE ENTRIES HERE!
return {
'name': '', #: a string, the name of the executor
'description': '', #: a string, the description of this executor. It will be used in automatics docs UI
'workspace': '', #: a string, the workspace of the executor
'py_modules': '', #: a list of strings, the python dependencies of the executor
}
def get_executor_taboo():
"""
Returns a set of executor meta variables
:return: set of executor meta variables
"""
taboo = {'self', 'args', 'kwargs', 'metas', 'requests', 'runtime_args'}
_defaults = get_default_metas()
taboo.update(_defaults.keys())
return taboo
|
from typing import Dict
def get_default_metas() -> Dict:
"""
Get a copy of default meta variables.
NOTE: DO NOT ADD MORE ENTRIES HERE!
:return: a deep copy of the default metas in a new dict
"""
# NOTE: DO NOT ADD MORE ENTRIES HERE!
return {
'name': '', #: a string, the name of the executor
'description': '', #: a string, the description of this executor. It will be used in automatics docs UI
'workspace': '', #: a string, the workspace of the executor
'py_modules': '', #: a list of strings, the python dependencies of the executor
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead'
]
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.prototype.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained *ConvTasNet* [:footcite:`Luo_2019`] pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on *Libri2Mix dataset* [:footcite:`cosentino2020librimix`] using training script
``lightning_train.py`` `here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"], sample_rate=44100),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototyoe.models.hdemucs_high` and utilizes weights trained on MUSDB-HQ [:footcite:`MUSDB18HQ`]
and internal extra training data, all at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.prototype.models import conv_tasnet_base
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained *ConvTasNet* [:footcite:`Luo_2019`] pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on *Libri2Mix dataset* [:footcite:`cosentino2020librimix`] using training script
``lightning_train.py`` `here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
|
"""
=======================================================
Label Propagation circles: Learning a complex structure
=======================================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# We generate a dataset with two concentric circles. In addition, a label
# is associated with each sample of the dataset that is: 0 (belonging to
# the outer circle), 1 (belonging to the inner circle), and -1 (unknown).
# Here, all labels but two are tagged as unknown.
import numpy as np
from sklearn.datasets import make_circles
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = np.full(n_samples, -1.0)
labels[0] = outer
labels[-1] = inner
# %%
# Plot raw data
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 4))
plt.scatter(
X[labels == outer, 0],
X[labels == outer, 1],
color="navy",
marker="s",
lw=0,
label="outer labeled",
s=10,
)
plt.scatter(
X[labels == inner, 0],
X[labels == inner, 1],
color="c",
marker="s",
lw=0,
label="inner labeled",
s=10,
)
plt.scatter(
X[labels == -1, 0],
X[labels == -1, 1],
color="darkorange",
marker=".",
label="unlabeled",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
_ = plt.title("Raw data (2 classes=outer and inner)")
# %%
#
# The aim of :class:`~sklearn.semi_supervised.LabelSpreading` is to associate
# a label to sample where the label is initially unknown.
from sklearn.semi_supervised import LabelSpreading
label_spread = LabelSpreading(kernel="knn", alpha=0.8)
label_spread.fit(X, labels)
# %%
# Now, we can check which labels have been associated with each sample
# when the label was unknown.
output_labels = label_spread.transduction_
output_label_array = np.asarray(output_labels)
outer_numbers = (output_label_array == outer).nonzero()[0]
inner_numbers = (output_label_array == inner).nonzero()[0]
plt.figure(figsize=(4, 4))
plt.scatter(
X[outer_numbers, 0],
X[outer_numbers, 1],
color="navy",
marker="s",
lw=0,
s=10,
label="outer learned",
)
plt.scatter(
X[inner_numbers, 0],
X[inner_numbers, 1],
color="c",
marker="s",
lw=0,
s=10,
label="inner learned",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
plt.title("Labels learned with Label Spreading (KNN)")
plt.show()
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# We generate a dataset with two concentric circles. In addition, a label
# is associated with each sample of the dataset that is: 0 (belonging to
# the outer circle), 1 (belonging to the inner circle), and -1 (unknown).
# Here, all labels but two are tagged as unknown.
import numpy as np
from sklearn.datasets import make_circles
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = np.full(n_samples, -1.0)
labels[0] = outer
labels[-1] = inner
# %%
# Plot raw data
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 4))
plt.scatter(
X[labels == outer, 0],
X[labels == outer, 1],
color="navy",
marker="s",
lw=0,
label="outer labeled",
s=10,
)
plt.scatter(
X[labels == inner, 0],
X[labels == inner, 1],
color="c",
marker="s",
lw=0,
label="inner labeled",
s=10,
)
plt.scatter(
X[labels == -1, 0],
X[labels == -1, 1],
color="darkorange",
marker=".",
label="unlabeled",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
_ = plt.title("Raw data (2 classes=outer and inner)")
# %%
#
# The aim of :class:`~sklearn.semi_supervised.LabelSpreading` is to associate
# a label to sample where the label is initially unknown.
from sklearn.semi_supervised import LabelSpreading
label_spread = LabelSpreading(kernel="knn", alpha=0.8)
label_spread.fit(X, labels)
# %%
# Now, we can check which labels have been associated with each sample
# when the label was unknown.
output_labels = label_spread.transduction_
output_label_array = np.asarray(output_labels)
outer_numbers = (output_label_array == outer).nonzero()[0]
inner_numbers = (output_label_array == inner).nonzero()[0]
plt.figure(figsize=(4, 4))
plt.scatter(
X[outer_numbers, 0],
X[outer_numbers, 1],
color="navy",
marker="s",
lw=0,
s=10,
label="outer learned",
)
plt.scatter(
X[inner_numbers, 0],
X[inner_numbers, 1],
color="c",
marker="s",
lw=0,
s=10,
label="inner learned",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
plt.title("Labels learned with Label Spreading (KNN)")
plt.show()
|
# mypy: allow-untyped-defs
import functools
from typing import Optional
import torch
from torch._C import _len_torch_function_stack
from torch.overrides import _pop_mode, _push_mode, TorchFunctionMode
from torch.utils._contextlib import context_decorator
CURRENT_DEVICE: Optional[torch.device] = None
@functools.lru_cache(1)
def _device_constructors():
return {
# standard ones
torch.empty,
torch.empty_permuted,
torch.empty_strided,
torch.empty_quantized,
torch.ones,
torch.arange,
torch.bartlett_window,
torch.blackman_window,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.full,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.nested.nested_tensor,
# This function doesn't actually take a device argument
# torch.normal,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.sparse_coo_tensor,
torch.sparse_compressed_tensor,
torch.sparse_csr_tensor,
torch.sparse_csc_tensor,
torch.sparse_bsr_tensor,
torch.sparse_bsc_tensor,
torch.tril_indices,
torch.triu_indices,
torch.zeros,
torch.asarray,
# weird ones
torch.tensor,
torch.as_tensor,
torch.scalar_tensor,
}
# NB: This is directly called from C++ in torch/csrc/Device.cpp
class DeviceContext(TorchFunctionMode):
def __init__(self, device):
self.device = torch.device(device)
def __enter__(self):
global CURRENT_DEVICE
self.old_device = CURRENT_DEVICE
CURRENT_DEVICE = self.device
# We need to put the device at the bottom of the stack
# If we set default device within a function mode context
# exiting that context mode will pop the device function mode off
# of the stack incorrectly
cur_stack = [_pop_mode() for _ in range(_len_torch_function_stack())]
_push_mode(self)
for mode in reversed(cur_stack):
_push_mode(mode)
def __exit__(self, exc_type, exc_val, exc_tb):
global CURRENT_DEVICE
CURRENT_DEVICE = self.old_device
cur_stack = []
# Invariant: there should only be one DeviceContext on the stack at any time
# (At the bottom), pop all modes until we hit the bottom, assert it's a DeviceContext
# or else someone else has popped it!
for _ in range(_len_torch_function_stack() - 1):
mode = _pop_mode()
assert not isinstance(mode, DeviceContext)
cur_stack.append(mode)
if _len_torch_function_stack() > 0:
mode = _pop_mode()
assert isinstance(mode, DeviceContext)
for mode in reversed(cur_stack):
_push_mode(mode)
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func in _device_constructors() and kwargs.get("device") is None:
kwargs["device"] = self.device
return func(*args, **kwargs)
# NB: This is directly called from C++ in torch/csrc/Device.cpp
def device_decorator(device, func):
return context_decorator(lambda: device, func)
def set_device(device):
"""
Set the default device inside of the wrapped function by decorating it with this function.
If you would like to use this as a context manager, use device as a
context manager directly, e.g., ``with torch.device(device)``.
"""
return lambda func: device_decorator(torch.device(device), func)
|
# mypy: allow-untyped-defs
import functools
from typing import Optional
import torch
from torch._C import _len_torch_function_stack
from torch.overrides import _pop_mode, _push_mode, TorchFunctionMode
from torch.utils._contextlib import context_decorator
CURRENT_DEVICE: Optional[torch.device] = None
@functools.lru_cache(1)
def _device_constructors():
return {
# standard ones
torch.empty,
torch.empty_permuted,
torch.empty_strided,
torch.empty_quantized,
torch.ones,
torch.arange,
torch.bartlett_window,
torch.blackman_window,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.full,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.nested.nested_tensor,
# This function doesn't actually take a device argument
# torch.normal,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.sparse_coo_tensor,
torch.sparse_compressed_tensor,
torch.sparse_csr_tensor,
torch.sparse_csc_tensor,
torch.sparse_bsr_tensor,
torch.sparse_bsc_tensor,
torch.tril_indices,
torch.triu_indices,
torch.zeros,
torch.asarray,
# weird ones
torch.tensor,
torch.as_tensor,
torch.scalar_tensor,
}
# NB: This is directly called from C++ in torch/csrc/Device.cpp
class DeviceContext(TorchFunctionMode):
def __init__(self, device):
self.device = torch.device(device)
def __enter__(self):
global CURRENT_DEVICE
self.old_device = CURRENT_DEVICE
CURRENT_DEVICE = self.device
# We need to put the device at the bottom of the stack
# If we set default device within a function mode context
# exiting that context mode will pop the device function mode off
# of the stack incorrectly
cur_stack = [_pop_mode() for _ in range(_len_torch_function_stack())]
_push_mode(self)
for mode in reversed(cur_stack):
_push_mode(mode)
def __exit__(self, exc_type, exc_val, exc_tb):
global CURRENT_DEVICE
CURRENT_DEVICE = self.old_device
cur_stack = []
# Invariant: there should only be one DeviceContext on the stack at any time
# (At the bottom), pop all mdoes until we hit the bottom, assert it's a DeviceContext
# or else someone else has popped it!
for _ in range(_len_torch_function_stack() - 1):
mode = _pop_mode()
assert not isinstance(mode, DeviceContext)
cur_stack.append(mode)
if _len_torch_function_stack() > 0:
mode = _pop_mode()
assert isinstance(mode, DeviceContext)
for mode in reversed(cur_stack):
_push_mode(mode)
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func in _device_constructors() and kwargs.get("device") is None:
kwargs["device"] = self.device
return func(*args, **kwargs)
# NB: This is directly called from C++ in torch/csrc/Device.cpp
def device_decorator(device, func):
return context_decorator(lambda: device, func)
def set_device(device):
"""
Set the default device inside of the wrapped function by decorating it with this function.
If you would like to use this as a context manager, use device as a
context manager directly, e.g., ``with torch.device(device)``.
"""
return lambda func: device_decorator(torch.device(device), func)
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
image_uri: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(image_uri)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
image_uri=args.image_uri,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --image-uri IMAGE_URI [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--image-uri",
type=str,
required=True,
help=(
"Fully qualified image URI to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
image_uri: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(["-e", "NCCL_RAS_ENABLE=0"])
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(image_uri)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
image_uri=args.image_uri,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --image-uri IMAGE_URI [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--image-uri",
type=str,
required=True,
help=(
"Fully qualified image URI to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
# Copyright 2025 Kakao Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UnCLIPTextProjModel(ModelMixin, ConfigMixin):
"""
Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the
decoder.
For more details, see the original paper: https://huggingface.co/papers/2204.06125 section 2.1
"""
@register_to_config
def __init__(
self,
*,
clip_extra_context_tokens: int = 4,
clip_embeddings_dim: int = 768,
time_embed_dim: int,
cross_attention_dim,
):
super().__init__()
self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim))
# parameters for additional clip time embeddings
self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim)
self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim)
# parameters for encoder hidden states
self.clip_extra_context_tokens = clip_extra_context_tokens
self.clip_extra_context_tokens_proj = nn.Linear(
clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim
)
self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim)
self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim)
def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
image_embeddings_batch_size = image_embeddings.shape[0]
classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand(
image_embeddings_batch_size, -1
)
image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
batch_size = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
time_projected_prompt_embeds = self.embedding_proj(prompt_embeds)
time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings)
additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings)
clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens)
clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1)
text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states)
text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states)
text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
|
# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UnCLIPTextProjModel(ModelMixin, ConfigMixin):
"""
Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the
decoder.
For more details, see the original paper: https://huggingface.co/papers/2204.06125 section 2.1
"""
@register_to_config
def __init__(
self,
*,
clip_extra_context_tokens: int = 4,
clip_embeddings_dim: int = 768,
time_embed_dim: int,
cross_attention_dim,
):
super().__init__()
self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim))
# parameters for additional clip time embeddings
self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim)
self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim)
# parameters for encoder hidden states
self.clip_extra_context_tokens = clip_extra_context_tokens
self.clip_extra_context_tokens_proj = nn.Linear(
clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim
)
self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim)
self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim)
def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
image_embeddings_batch_size = image_embeddings.shape[0]
classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand(
image_embeddings_batch_size, -1
)
image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
batch_size = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
time_projected_prompt_embeds = self.embedding_proj(prompt_embeds)
time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings)
additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings)
clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens)
clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1)
text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states)
text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states)
text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"Z\n\x0cKeyValuePair\x12#\n\x03key\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\";\n\x10GenericDictValue\x12\'\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x16.docarray.KeyValuePair\"\xcb\x03\n\tNodeProto\x12\x0e\n\x04text\x18\x01 \x01(\tH\x00\x12\x11\n\x07integer\x18\x02 \x01(\x05H\x00\x12\x0f\n\x05\x66loat\x18\x03 \x01(\x01H\x00\x12\x11\n\x07\x62oolean\x18\x04 \x01(\x08H\x00\x12\x0e\n\x04\x62lob\x18\x05 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12+\n\x08\x64ocument\x18\x07 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12\x36\n\x0e\x64ocument_array\x18\x08 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12*\n\x04list\x18\t \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12)\n\x03set\x18\n \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12+\n\x05tuple\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\'\n\x04\x64ict\x18\x0c \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x0e\n\x04type\x18\r \x01(\tH\x01\x42\t\n\x07\x63ontentB\x0f\n\rdocarray_type\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_KEYVALUEPAIR._serialized_start = 230
_KEYVALUEPAIR._serialized_end = 320
_GENERICDICTVALUE._serialized_start = 322
_GENERICDICTVALUE._serialized_end = 381
_NODEPROTO._serialized_start = 384
_NODEPROTO._serialized_end = 843
_DOCUMENTPROTO._serialized_start = 846
_DOCUMENTPROTO._serialized_end = 976
_DOCUMENTPROTO_DATAENTRY._serialized_start = 912
_DOCUMENTPROTO_DATAENTRY._serialized_end = 976
_DOCUMENTARRAYPROTO._serialized_start = 978
_DOCUMENTARRAYPROTO._serialized_end = 1037
_UNIONARRAYPROTO._serialized_start = 1040
_UNIONARRAYPROTO._serialized_end = 1174
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 1177
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1391
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1318
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1391
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe7\x01\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12+\n\x08\x64ocument\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12\x36\n\x0e\x64ocument_array\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12\x0e\n\x04type\x18\x06 \x01(\tH\x01\x42\t\n\x07\x63ontentB\x0f\n\rdocarray_type\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 462
_DOCUMENTPROTO._serialized_start = 465
_DOCUMENTPROTO._serialized_end = 595
_DOCUMENTPROTO_DATAENTRY._serialized_start = 531
_DOCUMENTPROTO_DATAENTRY._serialized_end = 595
_DOCUMENTARRAYPROTO._serialized_start = 597
_DOCUMENTARRAYPROTO._serialized_end = 656
_UNIONARRAYPROTO._serialized_start = 659
_UNIONARRAYPROTO._serialized_end = 793
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 796
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1010
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 937
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1010
# @@protoc_insertion_point(module_scope)
|
_base_ = './cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './cascade_rcnn_r50_fpn_lsj_200e_8x8_fp16_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import csv
import gzip
import os
from tqdm.autonotebook import tqdm
import sentence_transformers.util
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
import csv
from tqdm.autonotebook import tqdm
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
from ..extension import _load_library
try:
_load_library("gpu_decoder")
_HAS_GPU_VIDEO_DECODER = True
except (ImportError, OSError):
_HAS_GPU_VIDEO_DECODER = False
|
from ..extension import _load_library
try:
_load_library("Decoder")
_HAS_GPU_VIDEO_DECODER = True
except (ImportError, OSError):
_HAS_GPU_VIDEO_DECODER = False
|
"""Utilities for JSON Schema."""
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from collections.abc import Sequence
def _retrieve_ref(path: str, schema: dict) -> dict:
components = path.split("/")
if components[0] != "#":
msg = (
"ref paths are expected to be URI fragments, meaning they should start "
"with #."
)
raise ValueError(msg)
out = schema
for component in components[1:]:
if component in out:
out = out[component]
elif component.isdigit() and int(component) in out:
out = out[int(component)]
else:
msg = f"Reference '{path}' not found."
raise KeyError(msg)
return deepcopy(out)
def _dereference_refs_helper(
obj: Any,
full_schema: dict[str, Any],
skip_keys: Sequence[str],
processed_refs: Optional[set[str]] = None,
) -> Any:
if processed_refs is None:
processed_refs = set()
if isinstance(obj, dict):
obj_out = {}
for k, v in obj.items():
if k in skip_keys:
obj_out[k] = v
elif k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
full_ref = _dereference_refs_helper(
ref, full_schema, skip_keys, processed_refs
)
processed_refs.remove(v)
return full_ref
elif isinstance(v, (list, dict)):
obj_out[k] = _dereference_refs_helper(
v, full_schema, skip_keys, processed_refs
)
else:
obj_out[k] = v
return obj_out
if isinstance(obj, list):
return [
_dereference_refs_helper(el, full_schema, skip_keys, processed_refs)
for el in obj
]
return obj
def _infer_skip_keys(
obj: Any, full_schema: dict, processed_refs: Optional[set[str]] = None
) -> list[str]:
if processed_refs is None:
processed_refs = set()
keys = []
if isinstance(obj, dict):
for k, v in obj.items():
if k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
keys.append(v.split("/")[1])
keys += _infer_skip_keys(ref, full_schema, processed_refs)
elif isinstance(v, (list, dict)):
keys += _infer_skip_keys(v, full_schema, processed_refs)
elif isinstance(obj, list):
for el in obj:
keys += _infer_skip_keys(el, full_schema, processed_refs)
return keys
def dereference_refs(
schema_obj: dict,
*,
full_schema: Optional[dict] = None,
skip_keys: Optional[Sequence[str]] = None,
) -> dict:
"""Try to substitute $refs in JSON Schema.
Args:
schema_obj: The schema object to dereference.
full_schema: The full schema object. Defaults to None.
skip_keys: The keys to skip. Defaults to None.
Returns:
The dereferenced schema object.
"""
full_schema = full_schema or schema_obj
skip_keys = (
skip_keys
if skip_keys is not None
else _infer_skip_keys(schema_obj, full_schema)
)
return _dereference_refs_helper(schema_obj, full_schema, skip_keys)
|
"""Utilities for JSON Schema."""
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from collections.abc import Sequence
def _retrieve_ref(path: str, schema: dict) -> dict:
components = path.split("/")
if components[0] != "#":
msg = (
"ref paths are expected to be URI fragments, meaning they should start "
"with #."
)
raise ValueError(msg)
out = schema
for component in components[1:]:
if component in out:
out = out[component]
elif component.isdigit() and int(component) in out:
out = out[int(component)]
else:
msg = f"Reference '{path}' not found."
raise KeyError(msg)
return deepcopy(out)
def _dereference_refs_helper(
obj: Any,
full_schema: dict[str, Any],
skip_keys: Sequence[str],
processed_refs: Optional[set[str]] = None,
) -> Any:
if processed_refs is None:
processed_refs = set()
if isinstance(obj, dict):
obj_out = {}
for k, v in obj.items():
if k in skip_keys:
obj_out[k] = v
elif k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
full_ref = _dereference_refs_helper(
ref, full_schema, skip_keys, processed_refs
)
processed_refs.remove(v)
return full_ref
elif isinstance(v, (list, dict)):
obj_out[k] = _dereference_refs_helper(
v, full_schema, skip_keys, processed_refs
)
else:
obj_out[k] = v
return obj_out
elif isinstance(obj, list):
return [
_dereference_refs_helper(el, full_schema, skip_keys, processed_refs)
for el in obj
]
else:
return obj
def _infer_skip_keys(
obj: Any, full_schema: dict, processed_refs: Optional[set[str]] = None
) -> list[str]:
if processed_refs is None:
processed_refs = set()
keys = []
if isinstance(obj, dict):
for k, v in obj.items():
if k == "$ref":
if v in processed_refs:
continue
processed_refs.add(v)
ref = _retrieve_ref(v, full_schema)
keys.append(v.split("/")[1])
keys += _infer_skip_keys(ref, full_schema, processed_refs)
elif isinstance(v, (list, dict)):
keys += _infer_skip_keys(v, full_schema, processed_refs)
elif isinstance(obj, list):
for el in obj:
keys += _infer_skip_keys(el, full_schema, processed_refs)
return keys
def dereference_refs(
schema_obj: dict,
*,
full_schema: Optional[dict] = None,
skip_keys: Optional[Sequence[str]] = None,
) -> dict:
"""Try to substitute $refs in JSON Schema.
Args:
schema_obj: The schema object to dereference.
full_schema: The full schema object. Defaults to None.
skip_keys: The keys to skip. Defaults to None.
Returns:
The dereferenced schema object.
"""
full_schema = full_schema or schema_obj
skip_keys = (
skip_keys
if skip_keys is not None
else _infer_skip_keys(schema_obj, full_schema)
)
return _dereference_refs_helper(schema_obj, full_schema, skip_keys)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import models, datasets, losses
import logging
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
import sys
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Train Parameters
model_name = 'bert-base-uncased'
batch_size = 8
#Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ''
if len(sys.argv) >= 3:
output_name = "-"+sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = 'output/train_tsdae{}-{}'.format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8') as fIn:
for line in tqdm.tqdm(fIn, desc='Read file'):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("{} train sentences".format(len(train_sentences)))
################# Intialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), 'cls')
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler='constantlr',
optimizer_params={'lr': 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False #Set to True, if your GPU supports FP16 cores
)
|
import torch
from torch import Tensor
def sgd_out_of_place(param, grad, weight_decay, lr, maximize) -> Tensor:
"""
Computes a single step of SGD on a single parameter Tensor with grad.
Assumes:
- param and grad are the same shape and are 1D.
- param and grad are float and on CPU
Args:
param: a 1D tensor of floats
grad: a 1D tensor of floats
weight_decay: a python double between 0 and 1
lr: a python double
Returns:
a 1D float Tensor the same shape as param
"""
return torch.ops.libtorch_agnostic.sgd_out_of_place.default(
param, grad, weight_decay, lr, maximize
)
def identity(t) -> Tensor:
"""
Returns the input tensor
Args:
t: any Tensor
Returns:
a Tensor, the same as input.
"""
return torch.ops.libtorch_agnostic.identity.default(t)
def my_abs(t) -> Tensor:
"""
Returns abs on the input tensor, outputs a new Tensor
Args:
t: any Tensor
Returns:
a Tensor
"""
return torch.ops.libtorch_agnostic.my_abs.default(t)
def my_ones_like(tensor, device) -> Tensor:
"""
Returns a new Tensor like the input tensor, but with all ones
Args:
tensor: any Tensor
device: a device string
Returns:
a ones Tensor with the same dtype and shape and other attributes
like the input tensor
"""
return torch.ops.libtorch_agnostic.my_ones_like.default(tensor, device)
def exp_neg_is_leaf(t1, t2, t3) -> tuple[Tensor, Tensor, bool]:
"""
Returns a Tensor, Tensor, bool tuple corresponding to the respective inputs
t1, t2, and t3.
Args:
t1: Tensor
t2: Tensor
t3: Tensor
Returns:
(exp(t1), neg(t2), is_leaf(t3))
"""
return torch.ops.libtorch_agnostic.exp_neg_is_leaf.default(t1, t2, t3)
def neg_exp(t) -> Tensor:
"""
Returns a Tensor composing neg of exp
Args:
t: Tensor
Returns: neg(exp(t))
"""
return torch.ops.libtorch_agnostic.neg_exp.default(t)
def divide_neg_exp(t) -> Tensor:
"""
Returns a Tensor division of neg and exp
Args:
t: Tensor
Returns: divide(neg(t), exp(t))
"""
return torch.ops.libtorch_agnostic.divide_neg_exp.default(t)
def is_contiguous(t) -> bool:
"""
Returns a bool indicating if the input tensor is contiguous
Args:
t: Tensor
Returns: is_contiguous(t)
"""
return torch.ops.libtorch_agnostic.is_contiguous.default(t)
|
import torch
from torch import Tensor
def sgd_out_of_place(param, grad, weight_decay, lr, maximize) -> Tensor:
"""
Computes a single step of SGD on a single parameter Tensor with grad.
Assumes:
- param and grad are the same shape and are 1D.
- param and grad are float and on CPU
Args:
param: a 1D tensor of floats
grad: a 1D tensor of floats
weight_decay: a python double between 0 and 1
lr: a python double
Returns:
a 1D float Tensor the same shape as param
"""
return torch.ops.libtorch_agnostic.sgd_out_of_place.default(
param, grad, weight_decay, lr, maximize
)
def identity(t) -> Tensor:
"""
Returns the input tensor
Args:
t: any Tensor
Returns:
a Tensor, the same as input.
"""
return torch.ops.libtorch_agnostic.identity.default(t)
def my_abs(t) -> Tensor:
"""
Returns abs on the input tensor, outputs a new Tensor
Args:
t: any Tensor
Returns:
a Tensor
"""
return torch.ops.libtorch_agnostic.my_abs.default(t)
def my_ones_like(tensor, device) -> Tensor:
"""
Returns a new Tensor like the input tensor, but with all ones
Args:
tensor: any Tensor
device: a device string
Returns:
a ones Tensor with the same dtype and shape and other attributes
like the input tensor
"""
return torch.ops.libtorch_agnostic.my_ones_like.default(tensor, device)
def exp_neg_is_leaf(t1, t2, t3) -> tuple[Tensor, Tensor, bool]:
"""
Returns a Tensor, Tensor, bool tuple corresponding to the respective inputs
t1, t2, and t3.
Args:
t1: Tensor
t2: Tensor
t3: Tensor
Returns:
(exp(t1), neg(t2), is_leaf(t3))
"""
return torch.ops.libtorch_agnostic.exp_neg_is_leaf.default(t1, t2, t3)
def neg_exp(t) -> Tensor:
"""
Returns a Tensor composing neg of exp
Args:
t: Tensor
Returns: neg(exp(t))
"""
return torch.ops.libtorch_agnostic.neg_exp.default(t)
def divide_neg_exp(t) -> Tensor:
"""
Returns a Tensor division of neg and exp
Args:
t: Tensor
Returns: divide(neg(t), exp(t))
"""
return torch.ops.libtorch_agnostic.divide_neg_exp.default(t)
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
split: datasets.NamedSplit = datasets.Split.TRAIN
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
|
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_hrnetv2p-w32-gn-head_4xb4-1x_coco.py'
model = dict(
data_preprocessor=dict(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Test tool spec."""
from typing import List, Tuple, Union
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools.types import ToolMetadata
from llama_index.core.workflow import Context
class FooSchema(BaseModel):
arg1: str
arg2: int
class BarSchema(BaseModel):
arg1: bool
class AbcSchema(BaseModel):
arg1: str
class TestToolSpec(BaseToolSpec):
spec_functions: List[Union[str, Tuple[str, str]]] = ["foo", "bar", "abc", "abc_with_ctx"]
def foo(self, arg1: str, arg2: int) -> str:
"""Foo."""
return f"foo {arg1} {arg2}"
def bar(self, arg1: bool) -> str:
"""Bar."""
return f"bar {arg1}"
async def afoo(self, arg1: str, arg2: int) -> str:
"""Afoo."""
return self.foo(arg1=arg1, arg2=arg2)
async def abar(self, arg1: bool) -> str:
"""Abar."""
return self.bar(arg1=arg1)
def abc(self, arg1: str) -> str:
# NOTE: no docstring
return f"bar {arg1}"
def abc_with_ctx(self, arg1: str, ctx: Context) -> str:
return f"bar {arg1}"
def unused_function(self, arg1: str) -> str:
return f"unused {arg1}"
def test_tool_spec() -> None:
"""Test tool spec."""
tool_spec = TestToolSpec()
# first is foo, second is bar
tools = tool_spec.to_tool_list()
assert len(tools) == 4
assert tools[0].metadata.name == "foo"
assert tools[0].metadata.description == "foo(arg1: str, arg2: int) -> str\nFoo."
assert tools[0].fn("hello", 1) == "foo hello 1"
assert tools[0].ctx_param_name is None
assert not tools[0].requires_context
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert str(tools[1](True)) == "bar True"
assert tools[1].ctx_param_name is None
assert not tools[1].requires_context
assert tools[2].metadata.name == "abc"
assert tools[2].metadata.description == "abc(arg1: str) -> str\n"
assert tools[2].metadata.fn_schema.model_json_schema()["properties"] == AbcSchema.model_json_schema()["properties"]
assert tools[2].ctx_param_name is None
assert not tools[2].requires_context
assert tools[3].metadata.name == "abc_with_ctx"
assert tools[3].metadata.description == "abc_with_ctx(arg1: str) -> str\n"
assert tools[3].metadata.fn_schema.model_json_schema()["properties"] == AbcSchema.model_json_schema()["properties"]
assert tools[3].ctx_param_name == "ctx"
assert tools[3].requires_context
# test metadata mapping
tools = tool_spec.to_tool_list(
func_to_metadata_mapping={
"foo": ToolMetadata(
"foo_description", name="foo_name", fn_schema=FooSchema
),
}
)
assert len(tools) == 4
assert tools[0].metadata.name == "foo_name"
assert tools[0].metadata.description == "foo_description"
assert tools[0].metadata.fn_schema is not None
fn_schema = tools[0].metadata.fn_schema.model_json_schema()
print(fn_schema)
assert fn_schema["properties"]["arg1"]["type"] == "string"
assert fn_schema["properties"]["arg2"]["type"] == "integer"
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert tools[1].metadata.fn_schema is not None
fn_schema = tools[1].metadata.fn_schema.model_json_schema()
assert fn_schema["properties"]["arg1"]["type"] == "boolean"
@pytest.mark.asyncio
async def test_tool_spec_async() -> None:
"""Test async_fn of tool spec."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list()
assert len(tools) == 4
assert await tools[0].async_fn("hello", 1) == "foo hello 1"
assert str(await tools[1].acall(True)) == "bar True"
def test_async_patching() -> None:
# test sync patching of async function
tool_spec = TestToolSpec()
tool_spec.spec_functions = ["afoo"]
tools = tool_spec.to_tool_list()
assert len(tools) == 1
assert tools[0].fn("hello", 1) == "foo hello 1"
def test_tool_spec_subset() -> None:
"""Test tool spec subset."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list(spec_functions=["abc"])
assert len(tools) == 1
assert tools[0].metadata.name == "abc"
assert tools[0].metadata.description == "abc(arg1: str) -> str\n"
assert tools[0].metadata.fn_schema.model_json_schema()["properties"] == AbcSchema.model_json_schema()["properties"]
|
"""Test tool spec."""
from typing import List, Optional, Tuple, Type, Union
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools.types import ToolMetadata
class FooSchema(BaseModel):
arg1: str
arg2: int
class BarSchema(BaseModel):
arg1: bool
class AbcSchema(BaseModel):
arg1: str
class TestToolSpec(BaseToolSpec):
spec_functions: List[Union[str, Tuple[str, str]]] = ["foo", "bar", "abc"]
def foo(self, arg1: str, arg2: int) -> str:
"""Foo."""
return f"foo {arg1} {arg2}"
def bar(self, arg1: bool) -> str:
"""Bar."""
return f"bar {arg1}"
async def afoo(self, arg1: str, arg2: int) -> str:
"""Afoo."""
return self.foo(arg1=arg1, arg2=arg2)
async def abar(self, arg1: bool) -> str:
"""Abar."""
return self.bar(arg1=arg1)
def abc(self, arg1: str) -> str:
# NOTE: no docstring
return f"bar {arg1}"
def get_fn_schema_from_fn_name(
self,
fn_name: str,
spec_functions: Optional[List[Union[str, Tuple[str, str]]]] = None,
) -> Type[BaseModel]:
"""Return map from function name."""
spec_functions = spec_functions or self.spec_functions
if fn_name == "foo":
return FooSchema
elif fn_name == "afoo":
return FooSchema
elif fn_name == "bar":
return BarSchema
elif fn_name == "abc":
return AbcSchema
else:
raise ValueError(f"Invalid function name: {fn_name}")
def test_tool_spec() -> None:
"""Test tool spec."""
tool_spec = TestToolSpec()
# first is foo, second is bar
tools = tool_spec.to_tool_list()
assert len(tools) == 3
assert tools[0].metadata.name == "foo"
assert tools[0].metadata.description == "foo(arg1: str, arg2: int) -> str\nFoo."
assert tools[0].fn("hello", 1) == "foo hello 1"
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert str(tools[1](True)) == "bar True"
assert tools[2].metadata.name == "abc"
assert tools[2].metadata.description == "abc(arg1: str) -> str\n"
assert tools[2].metadata.fn_schema == AbcSchema
# test metadata mapping
tools = tool_spec.to_tool_list(
func_to_metadata_mapping={
"foo": ToolMetadata(
"foo_description", name="foo_name", fn_schema=FooSchema
),
}
)
assert len(tools) == 3
assert tools[0].metadata.name == "foo_name"
assert tools[0].metadata.description == "foo_description"
assert tools[0].metadata.fn_schema is not None
fn_schema = tools[0].metadata.fn_schema.model_json_schema()
print(fn_schema)
assert fn_schema["properties"]["arg1"]["type"] == "string"
assert fn_schema["properties"]["arg2"]["type"] == "integer"
assert tools[1].metadata.name == "bar"
assert tools[1].metadata.description == "bar(arg1: bool) -> str\nBar."
assert tools[1].metadata.fn_schema is not None
fn_schema = tools[1].metadata.fn_schema.model_json_schema()
assert fn_schema["properties"]["arg1"]["type"] == "boolean"
@pytest.mark.asyncio
async def test_tool_spec_async() -> None:
"""Test async_fn of tool spec."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list()
assert len(tools) == 3
assert await tools[0].async_fn("hello", 1) == "foo hello 1"
assert str(await tools[1].acall(True)) == "bar True"
def test_async_patching() -> None:
# test sync patching of async function
tool_spec = TestToolSpec()
tool_spec.spec_functions = ["afoo"]
tools = tool_spec.to_tool_list()
assert len(tools) == 1
assert tools[0].fn("hello", 1) == "foo hello 1"
def test_tool_spec_schema() -> None:
"""Test tool spec schemas match."""
tool_spec = TestToolSpec()
# first is foo, second is bar
schema1 = tool_spec.get_fn_schema_from_fn_name("foo")
assert schema1 == FooSchema
schema2 = tool_spec.get_fn_schema_from_fn_name("bar")
assert schema2 == BarSchema
def test_tool_spec_subset() -> None:
"""Test tool spec subset."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list(spec_functions=["abc"])
assert len(tools) == 1
assert tools[0].metadata.name == "abc"
assert tools[0].metadata.description == "abc(arg1: str) -> str\n"
assert tools[0].metadata.fn_schema == AbcSchema
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
from jina.serve.helper import extract_trailing_metadata
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadServerFlow(Exception, BaseJinaException):
"""A wrongly defined Flow on the server side"""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when trying to use a port which is already used"""
class EstablishGrpcConnectionError(Exception, BaseJinaException):
"""Raised when Exception occurs when establishing or resetting gRPC connection"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
if self._details:
trailing_metadata = extract_trailing_metadata(self.og_exception)
if trailing_metadata:
return f'{self._details}\n{trailing_metadata}'
else:
return self._details
return self.og_exception.details()
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadServerFlow(Exception, BaseJinaException):
"""A wrongly defined Flow on the server side"""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when trying to use a port which is already used"""
class EstablishGrpcConnectionError(Exception, BaseJinaException):
"""Raised when Exception occurs when establishing or resetting gRPC connection"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
"""Loads Microsoft Excel files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredXMLLoader(UnstructuredFileLoader):
"""Load `XML` file using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredXMLLoader
loader = UnstructuredXMLLoader(
"example.xml", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-xml
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xml import partition_xml
return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
|
"""Loads Microsoft Excel files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredXMLLoader(UnstructuredFileLoader):
"""Load `XML` file using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredXMLLoader
loader = UnstructuredXMLLoader(
"example.xml", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-xml
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xml import partition_xml
return partition_xml(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
"""Documents module.
**Document** module is a collection of classes that handle documents
and their transformations.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from .base import Document
from .compressor import BaseDocumentCompressor
from .transformers import BaseDocumentTransformer
__all__ = ("BaseDocumentCompressor", "BaseDocumentTransformer", "Document")
_dynamic_imports = {
"Document": "base",
"BaseDocumentCompressor": "compressor",
"BaseDocumentTransformer": "transformers",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Documents module.
**Document** module is a collection of classes that handle documents
and their transformations.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from .base import Document
from .compressor import BaseDocumentCompressor
from .transformers import BaseDocumentTransformer
__all__ = ("Document", "BaseDocumentTransformer", "BaseDocumentCompressor")
_dynamic_imports = {
"Document": "base",
"BaseDocumentCompressor": "compressor",
"BaseDocumentTransformer": "transformers",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_data_element import BaseDataElement
from .base_data_sample import BaseDataSample
from .sampler import DefaultSampler, InfiniteSampler
__all__ = [
'BaseDataElement', 'BaseDataSample', 'DefaultSampler', 'InfiniteSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .sampler import DefaultSampler, InfiniteSampler
__all__ = ['DefaultSampler', 'InfiniteSampler']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
|
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import FCNMaskHead, MaskIoUHead
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR,
LinearLR, MultiStepLR, PolyLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
ExponentialMomentum, LinearMomentum,
MultiStepMomentum, PolyMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, PolyParamScheduler,
StepParamScheduler, _ParamScheduler)
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler',
'PolyParamScheduler', 'PolyLR', 'PolyMomentum'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .lr_scheduler import (ConstantLR, CosineAnnealingLR, ExponentialLR,
LinearLR, MultiStepLR, StepLR)
from .momentum_scheduler import (ConstantMomentum, CosineAnnealingMomentum,
ExponentialMomentum, LinearMomentum,
MultiStepMomentum, StepMomentum)
from .param_scheduler import (ConstantParamScheduler,
CosineAnnealingParamScheduler,
ExponentialParamScheduler, LinearParamScheduler,
MultiStepParamScheduler, StepParamScheduler,
_ParamScheduler)
__all__ = [
'ConstantLR', 'CosineAnnealingLR', 'ExponentialLR', 'LinearLR',
'MultiStepLR', 'StepLR', 'ConstantMomentum', 'CosineAnnealingMomentum',
'ExponentialMomentum', 'LinearMomentum', 'MultiStepMomentum',
'StepMomentum', 'ConstantParamScheduler', 'CosineAnnealingParamScheduler',
'ExponentialParamScheduler', 'LinearParamScheduler',
'MultiStepParamScheduler', 'StepParamScheduler', '_ParamScheduler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_dataset import BaseDataset, Compose, force_full_init
from .dataset_wrapper import ClassBalancedDataset, ConcatDataset, RepeatDataset
from .sampler import DefaultSampler, InfiniteSampler
from .utils import pseudo_collate, worker_init_fn
__all__ = [
'BaseDataset', 'Compose', 'force_full_init', 'ClassBalancedDataset',
'ConcatDataset', 'RepeatDataset', 'DefaultSampler', 'InfiniteSampler',
'worker_init_fn', 'pseudo_collate'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .base_dataset import BaseDataset, Compose, force_full_init
from .dataset_wrapper import ClassBalancedDataset, ConcatDataset, RepeatDataset
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from __future__ import annotations
from typing_extensions import deprecated
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder.evaluation.CEClassificationEvaluator import CEClassificationEvaluator
@deprecated(
"This evaluator has been deprecated in favor of the more general CEClassificationEvaluator. "
"Please use CEClassificationEvaluator instead, which supports both binary and multi-class "
"evaluation. It accepts approximately the same inputs as this evaluator."
)
class CESoftmaxAccuracyEvaluator(CEClassificationEvaluator):
"""
This evaluator has been deprecated in favor of the more general CEClassificationEvaluator.
"""
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: list[list[str]], labels: list[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info(f"Accuracy: {acc * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent an image tensor. Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("5.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.2.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""Toolkit for interacting with a vector store."""
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
except ImportError:
raise ImportError(
"You need to install langchain-community to use this toolkit."
)
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: list[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
tools: list[BaseTool] = []
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
)
except ImportError:
raise ImportError(
"You need to install langchain-community to use this toolkit."
)
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
|
"""Toolkit for interacting with a vector store."""
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
except ImportError:
raise ImportError(
"You need to install langchain-community to use this toolkit."
)
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
)
except ImportError:
raise ImportError(
"You need to install langchain-community to use this toolkit."
)
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.dict import DictPromptTemplate
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = (
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"DictPromptTemplate",
"FewShotChatMessagePromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"aformat_document",
"check_valid_template",
"format_document",
"get_template_variables",
"jinja2_formatter",
"load_prompt",
"validate_jinja2",
)
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"DictPromptTemplate": "dict",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.dict import DictPromptTemplate
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = (
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"DictPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
)
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"DictPromptTemplate": "dict",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
:param model: SentenceTransformerModel
:param similarity_fct: Function to compute the PAIRWISE similarity between embeddings. Default is ``util.pairwise_cos_sim``.
:param scale: Output of similarity function is multiplied by scale value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('bert-base-uncased')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=1.0),
InputExample(texts=['My third sentence', 'Unrelated sentence'], label=0.3)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
from docarray import DocumentArray
from jina import Executor, requests
from jina.parsers import set_pod_parser
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _validate_dummy_custom_gateway_response(port, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/').json()
assert resp == expected
def _validate_custom_gateway_process(port, text, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/stream?text={text}').json()
assert resp == expected
# set_pod_parser returns a parser for worker runtime, which expects list of ports (because external executors
# can provide multiple ports and hosts). However this parser is not compatible with ContainerPod, Pod and worker runtime.
# Should we add a seperate parser for Pod?
def _generate_pod_args(cli_split: list = []):
args = set_pod_parser().parse_args(cli_split)
args.host = args.host[0]
args.port_monitoring = args.port_monitoring[0]
return args
|
from docarray import DocumentArray
from jina import Executor, requests
from jina.parsers import set_pod_parser
class ProcessExecutor(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _validate_dummy_custom_gateway_response(port, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/').json()
assert resp == expected
def _validate_custom_gateway_process(port, text, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/stream?text={text}').json()
assert resp == expected
# set_pod_parser returns a parser for worker runtime, which expects list of ports (because external executors
# can provide multiple ports and hosts). However this parser is not compatible with ContainerPod, Pod and worker runtime.
# Should we add a seperate parser for Pod?
def _generate_pod_args(cli_split: list = []):
args = set_pod_parser().parse_args(cli_split)
args.host = args.host[0]
args.port = args.port[0]
args.port_monitoring = args.port_monitoring[0]
return args
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer',
'PyramidVisionTransformerV2', 'EfficientNet'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer', 'PyramidVisionTransformerV2'
]
|
from docarray import BaseDoc
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDoc):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
from docarray import BaseDoc
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDoc):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
import multiprocessing
import socket
import sys
from threading import Thread
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
def run_rabit_worker(rabit_env, world_size):
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast("test1234", 0)
assert str(ret) == "test1234"
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_rabit_communicator() -> None:
world_size = 2
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
tracker.start()
workers = []
for _ in range(world_size):
worker = multiprocessing.Process(
target=run_rabit_worker, args=(tracker.worker_args(), world_size)
)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
def run_federated_worker(port: int, world_size: int, rank: int) -> None:
with xgb.collective.CommunicatorContext(
dmlc_communicator="federated",
federated_server_address=f"localhost:{port}",
federated_world_size=world_size,
federated_rank=rank,
):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f"rank:{rank}"
bret = xgb.collective.broadcast("test1234", 0)
assert str(bret) == "test1234"
aret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(aret, np.asarray([2, 4, 6]))
@pytest.mark.skipif(**tm.skip_win())
def test_federated_communicator():
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
tracker = multiprocessing.Process(
target=federated.run_federated_server,
kwargs={"port": port, "n_workers": world_size},
)
tracker.start()
if not tracker.is_alive():
raise Exception("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = multiprocessing.Process(
target=run_federated_worker, args=(port, world_size, rank)
)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
|
import multiprocessing
import socket
import sys
import time
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
if sys.platform.startswith("win"):
pytest.skip("Skipping collective tests on Windows", allow_module_level=True)
def run_rabit_worker(rabit_env, world_size):
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_rabit_communicator():
world_size = 2
tracker = RabitTracker(host_ip='127.0.0.1', n_workers=world_size)
tracker.start(world_size)
workers = []
for _ in range(world_size):
worker = multiprocessing.Process(target=run_rabit_worker,
args=(tracker.worker_envs(), world_size))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
def run_federated_worker(port, world_size, rank):
with xgb.collective.CommunicatorContext(xgboost_communicator='federated',
federated_server_address=f'localhost:{port}',
federated_world_size=world_size,
federated_rank=rank):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f'rank{rank}'
ret = xgb.collective.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_federated_communicator():
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
server = multiprocessing.Process(target=xgb.federated.run_federated_server, args=(port, world_size))
server.start()
time.sleep(1)
if not server.is_alive():
raise Exception("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = multiprocessing.Process(target=run_federated_worker,
args=(port, world_size, rank))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
server.terminate()
|
import functools
from typing import (
Optional,
TYPE_CHECKING,
Iterable,
Callable,
Dict,
)
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import (
DocumentArraySourceType,
)
def needs_id2offset_rebuild(func) -> Callable:
# self._id2offset needs to be rebuilt after every insert or delete
# this flag allows to do it lazily and cache the result
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self._needs_id2offset_rebuild = True
return func(self, *args, **kwargs)
return wrapper
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
@property
def _id2offset(self) -> Dict[str, int]:
"""Return the `_id_to_index` map
:return: a Python dict.
"""
if self._needs_id2offset_rebuild:
self._rebuild_id2offset()
return self._id_to_index
def _rebuild_id2offset(self) -> None:
"""Update the id_to_index map by enumerating all Documents in self._data.
Very costy! Only use this function when self._data is dramtically changed.
"""
self._id_to_index = {
d.id: i for i, d in enumerate(self._data)
} # type: Dict[str, int]
self._needs_id2offset_rebuild = False
@needs_id2offset_rebuild
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
from docarray.array.memory import DocumentArrayInMemory
super()._init_storage(_docs, copy=copy, *args, **kwargs)
self._data = []
self._id_to_index = {}
if _docs is None:
return
elif isinstance(
_docs,
Iterable,
):
if copy:
self._data = [Document(d, copy=True) for d in _docs]
elif isinstance(_docs, DocumentArrayInMemory):
self._data = _docs._data
self._id_to_index = _docs._id2offset
self._needs_id2offset_rebuild = _docs._needs_id2offset_rebuild
else:
self.extend(_docs)
else:
if isinstance(_docs, Document):
if copy:
self.append(Document(_docs, copy=True))
else:
self.append(_docs)
|
import functools
from typing import (
Optional,
TYPE_CHECKING,
Iterable,
Callable,
Dict,
)
from ..base.backend import BaseBackendMixin
from .... import Document
if TYPE_CHECKING:
from ....typing import (
DocumentArraySourceType,
)
def needs_id2offset_rebuild(func) -> Callable:
# self._id2offset needs to be rebuilt after every insert or delete
# this flag allows to do it lazily and cache the result
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self._needs_id2offset_rebuild = True
return func(self, *args, **kwargs)
return wrapper
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
@property
def _id2offset(self) -> Dict[str, int]:
"""Return the `_id_to_index` map
:return: a Python dict.
"""
if self._needs_id2offset_rebuild:
self._rebuild_id2offset()
return self._id_to_index
def _rebuild_id2offset(self) -> None:
"""Update the id_to_index map by enumerating all Documents in self._data.
Very costy! Only use this function when self._data is dramtically changed.
"""
self._id_to_index = {
d.id: i for i, d in enumerate(self._data)
} # type: Dict[str, int]
self._needs_id2offset_rebuild = False
@needs_id2offset_rebuild
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
from docarray.array.memory import DocumentArrayInMemory
super()._init_storage(_docs, copy=copy, *args, **kwargs)
self._data = []
self._id_to_index = {}
if _docs is None:
return
elif isinstance(
_docs,
Iterable,
):
if copy:
self._data = [Document(d, copy=True) for d in _docs]
elif isinstance(_docs, DocumentArrayInMemory):
self._data = _docs._data
self._id_to_index = _docs._id2offset
self._needs_id2offset_rebuild = _docs._needs_id2offset_rebuild
else:
self.extend(_docs)
else:
if isinstance(_docs, Document):
if copy:
self.append(Document(_docs, copy=True))
else:
self.append(_docs)
|
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from jina.excepts import BadDocType
from ...vad_speech_segmenter import VADSpeechSegmenter
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert type(segmenter).__name__ == 'VADSpeechSegmenter'
@pytest.mark.parametrize('_type', ['wav', 'mp3', 'blob', '', None])
def test_segment(build_da, segmenter, _type):
if _type == '':
with pytest.raises(
BadDocType, match='doc needs to have either a blob or a wav/mp3 uri'
):
segmenter.segment(DocumentArray(Document()))
return
elif _type is None:
segmenter.segment(DocumentArray())
return
docs = build_da(_type)
segmenter.segment(docs)
# assert doc has 4 chunks
for doc in docs:
assert len(doc.chunks) == 4
file_paths = [
f'doc_{_type}_original.wav',
f'chunk_{_type}_0_56500.wav',
f'chunk_{_type}_69500_92000.wav',
f'chunk_{_type}_94500_213000.wav',
f'chunk_{_type}_223500_270500.wav',
]
# assert dumped files exist
for file_path in file_paths:
assert (Path(segmenter.workspace) / f'audio/{file_path}').is_file()
# assert exception is raised when doc blob is provided by sample rate is not
if _type == 'blob':
docs[0].tags.pop('sample_rate')
with pytest.raises(
BadDocType, match='data is blob but sample rate is not provided'
):
segmenter.segment(docs)
docs[0].tags['sample_rate'] = 0
with pytest.raises(BadDocType, match='sample rate cannot be 0'):
segmenter.segment(docs)
|
from pathlib import Path
import pytest
from jina import Executor, DocumentArray, Document
from jina.excepts import BadDocType
from ...vad_speech_segmenter import VADSpeechSegmenter
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert type(segmenter).__name__ == 'VADSpeechSegmenter'
@pytest.mark.parametrize('_type', ['wav', 'mp3', 'blob', '', None])
def test_segment(build_da, segmenter, _type):
if _type == '':
with pytest.raises(
BadDocType, match='doc needs to have either a blob or a wav/mp3 uri'
):
segmenter.segment(DocumentArray(Document()))
return
elif _type is None:
segmenter.segment(DocumentArray())
return
docs = build_da(_type)
segmenter.segment(docs)
# assert doc has 4 chunks
for doc in docs:
assert len(doc.chunks) == 4
file_paths = [
f'doc_{_type}_original.wav',
f'chunk_{_type}_0_56500.wav',
f'chunk_{_type}_69500_92000.wav',
f'chunk_{_type}_94500_213000.wav',
f'chunk_{_type}_223500_270500.wav',
]
# assert dumped files exist
for file_path in file_paths:
assert (Path(segmenter.workspace) / f'audio/{file_path}').is_file()
# assert exception is raised when doc blob is provided by sample rate is not
if _type == 'blob':
docs[0].tags.pop('sample_rate')
with pytest.raises(
BadDocType, match='data is blob but sample rate is not provided'
):
segmenter.segment(docs)
docs[0].tags['sample_rate'] = 0
with pytest.raises(BadDocType, match='sample rate cannot be 0'):
segmenter.segment(docs)
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocumentArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
def test_get_paths():
paths = list(get_paths(patterns='*.py'))
for path in paths:
assert path.endswith('.py')
def test_get_paths_recursive():
paths_rec = list(get_paths(patterns='**', recursive=True))
paths_not_rec = list(get_paths(patterns='**', recursive=False))
assert len(paths_rec) > len(paths_not_rec)
def test_get_paths_exclude():
paths = list(get_paths(patterns='*.py'))
paths_wo_init = list(get_paths(patterns='*.py', exclude_regex='__init__.[a-z]*'))
assert len(paths_wo_init) <= len(paths)
assert '__init__.py' not in paths_wo_init
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocumentArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine'
]
|
"""
=========================
Tensor transforms and JIT
=========================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_scripted_tensor_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_others_plot_scripted_tensor_transforms.py>` to download the full example code.
This example illustrates various features that are now supported by the
:ref:`image transformations <transforms>` on Tensor images. In particular, we
show how image transforms can be performed on GPU, and how one can also script
them using JIT compilation.
Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric
and presented multiple limitations due to that. Now, since v0.8.0, transforms
implementations are Tensor and PIL compatible, and we can achieve the following
new features:
- transform multi-band torch tensor images (with more than 3-4 channels)
- torchscript transforms together with your model for deployment
- support for GPU acceleration
- batched transformation such as for videos
- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)
.. note::
These features are only possible with **Tensor** images.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as T
from torchvision.io import read_image
plt.rcParams["savefig.bbox"] = 'tight'
torch.manual_seed(1)
def show(imgs):
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = T.ToPILImage()(img.to('cpu'))
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
# %%
# The :func:`~torchvision.io.read_image` function allows to read an image and
# directly load it as a tensor
dog1 = read_image(str(Path('../assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('../assets') / 'dog2.jpg'))
show([dog1, dog2])
# %%
# Transforming images on GPU
# --------------------------
# Most transforms natively support tensors on top of PIL images (to visualize
# the effect of the transforms, you may refer to see
# :ref:`sphx_glr_auto_examples_others_plot_transforms.py`).
# Using tensor images, we can run the transforms on GPUs if cuda is available!
import torch.nn as nn
transforms = torch.nn.Sequential(
T.RandomCrop(224),
T.RandomHorizontalFlip(p=0.3),
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dog1 = dog1.to(device)
dog2 = dog2.to(device)
transformed_dog1 = transforms(dog1)
transformed_dog2 = transforms(dog2)
show([transformed_dog1, transformed_dog2])
# %%
# Scriptable transforms for easier deployment via torchscript
# -----------------------------------------------------------
# We now show how to combine image transformations and a model forward pass,
# while using ``torch.jit.script`` to obtain a single scripted module.
#
# Let's define a ``Predictor`` module that transforms the input tensor and then
# applies an ImageNet model on it.
from torchvision.models import resnet18, ResNet18_Weights
class Predictor(nn.Module):
def __init__(self):
super().__init__()
weights = ResNet18_Weights.DEFAULT
self.resnet18 = resnet18(weights=weights, progress=False).eval()
self.transforms = weights.transforms()
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# %%
# Now, let's define scripted and non-scripted instances of ``Predictor`` and
# apply it on multiple tensor images of the same size
predictor = Predictor().to(device)
scripted_predictor = torch.jit.script(predictor).to(device)
batch = torch.stack([dog1, dog2]).to(device)
res = predictor(batch)
res_scripted = scripted_predictor(batch)
# %%
# We can verify that the prediction of the scripted and non-scripted models are
# the same:
import json
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
assert pred == pred_scripted
print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}")
# %%
# Since the model is scripted, it can be easily dumped on disk and re-used
import tempfile
with tempfile.NamedTemporaryFile() as f:
scripted_predictor.save(f.name)
dumped_scripted_predictor = torch.jit.load(f.name)
res_scripted_dumped = dumped_scripted_predictor(batch)
assert (res_scripted_dumped == res_scripted).all()
|
"""
=========================
Tensor transforms and JIT
=========================
This example illustrates various features that are now supported by the
:ref:`image transformations <transforms>` on Tensor images. In particular, we
show how image transforms can be performed on GPU, and how one can also script
them using JIT compilation.
Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric
and presented multiple limitations due to that. Now, since v0.8.0, transforms
implementations are Tensor and PIL compatible, and we can achieve the following
new features:
- transform multi-band torch tensor images (with more than 3-4 channels)
- torchscript transforms together with your model for deployment
- support for GPU acceleration
- batched transformation such as for videos
- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)
.. note::
These features are only possible with **Tensor** images.
"""
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as T
from torchvision.io import read_image
plt.rcParams["savefig.bbox"] = 'tight'
torch.manual_seed(1)
def show(imgs):
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = T.ToPILImage()(img.to('cpu'))
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
# %%
# The :func:`~torchvision.io.read_image` function allows to read an image and
# directly load it as a tensor
dog1 = read_image(str(Path('../assets') / 'dog1.jpg'))
dog2 = read_image(str(Path('../assets') / 'dog2.jpg'))
show([dog1, dog2])
# %%
# Transforming images on GPU
# --------------------------
# Most transforms natively support tensors on top of PIL images (to visualize
# the effect of the transforms, you may refer to see
# :ref:`sphx_glr_auto_examples_others_plot_transforms.py`).
# Using tensor images, we can run the transforms on GPUs if cuda is available!
import torch.nn as nn
transforms = torch.nn.Sequential(
T.RandomCrop(224),
T.RandomHorizontalFlip(p=0.3),
)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dog1 = dog1.to(device)
dog2 = dog2.to(device)
transformed_dog1 = transforms(dog1)
transformed_dog2 = transforms(dog2)
show([transformed_dog1, transformed_dog2])
# %%
# Scriptable transforms for easier deployment via torchscript
# -----------------------------------------------------------
# We now show how to combine image transformations and a model forward pass,
# while using ``torch.jit.script`` to obtain a single scripted module.
#
# Let's define a ``Predictor`` module that transforms the input tensor and then
# applies an ImageNet model on it.
from torchvision.models import resnet18, ResNet18_Weights
class Predictor(nn.Module):
def __init__(self):
super().__init__()
weights = ResNet18_Weights.DEFAULT
self.resnet18 = resnet18(weights=weights, progress=False).eval()
self.transforms = weights.transforms()
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.resnet18(x)
return y_pred.argmax(dim=1)
# %%
# Now, let's define scripted and non-scripted instances of ``Predictor`` and
# apply it on multiple tensor images of the same size
predictor = Predictor().to(device)
scripted_predictor = torch.jit.script(predictor).to(device)
batch = torch.stack([dog1, dog2]).to(device)
res = predictor(batch)
res_scripted = scripted_predictor(batch)
# %%
# We can verify that the prediction of the scripted and non-scripted models are
# the same:
import json
with open(Path('../assets') / 'imagenet_class_index.json') as labels_file:
labels = json.load(labels_file)
for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)):
assert pred == pred_scripted
print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}")
# %%
# Since the model is scripted, it can be easily dumped on disk and re-used
import tempfile
with tempfile.NamedTemporaryFile() as f:
scripted_predictor.save(f.name)
dumped_scripted_predictor = torch.jit.load(f.name)
res_scripted_dumped = dumped_scripted_predictor(batch)
assert (res_scripted_dumped == res_scripted).all()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import Lumina2Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = Lumina2Transformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2 # N
num_channels = 4 # C
height = width = 16 # H, W
embedding_dim = 32 # D
sequence_length = 16 # L
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.rand(size=(batch_size,)).to(torch_device)
attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"encoder_attention_mask": attention_mask,
}
@property
def input_shape(self):
return (4, 16, 16)
@property
def output_shape(self):
return (4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 16,
"patch_size": 2,
"in_channels": 4,
"hidden_size": 24,
"num_layers": 2,
"num_refiner_layers": 1,
"num_attention_heads": 3,
"num_kv_heads": 1,
"multiple_of": 2,
"ffn_dim_multiplier": None,
"norm_eps": 1e-5,
"scaling_factor": 1.0,
"axes_dim_rope": (4, 2, 2),
"axes_lens": (128, 128, 128),
"cap_feat_dim": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Lumina2Transformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import Lumina2Transformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = Lumina2Transformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2 # N
num_channels = 4 # C
height = width = 16 # H, W
embedding_dim = 32 # D
sequence_length = 16 # L
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.rand(size=(batch_size,)).to(torch_device)
attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"attention_mask": attention_mask,
}
@property
def input_shape(self):
return (4, 16, 16)
@property
def output_shape(self):
return (4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 16,
"patch_size": 2,
"in_channels": 4,
"hidden_size": 24,
"num_layers": 2,
"num_refiner_layers": 1,
"num_attention_heads": 3,
"num_kv_heads": 1,
"multiple_of": 2,
"ffn_dim_multiplier": None,
"norm_eps": 1e-5,
"scaling_factor": 1.0,
"axes_dim_rope": (4, 2, 2),
"axes_lens": (128, 128, 128),
"cap_feat_dim": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"Lumina2Transformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
from typing import Any, Mapping, Optional
from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler
class AirbyteGongReader(AirbyteCDKReader):
"""
AirbyteGongReader reader.
Retrieve documents from Gong
Args:
config: The config object for the gong source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_gong
super().__init__(
source_class=source_gong.SourceGong,
config=config,
record_handler=record_handler,
)
|
from typing import Any, Mapping, Optional
from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler
class AirbyteGongReader(AirbyteCDKReader):
"""AirbyteGongReader reader.
Retrieve documents from Gong
Args:
config: The config object for the gong source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_gong
super().__init__(
source_class=source_gong.SourceGong,
config=config,
record_handler=record_handler,
)
|
import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import logging
import os
import csv
from typing import List
from ... import InputExample
import numpy as np
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
__version__ = '0.16.4'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.16.3'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
def test_set_embedding():
class MyDocument(BaseDoc):
embedding: AnyEmbedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
def test_set_embedding():
class MyDocument(BaseDoc):
embedding: AnyEmbedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
__version__ = '0.13.5'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.4'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
for pos in range(0, len(ids), self.MAX_ES_RETURNED_DOCS):
es_docs = self._client.mget(
index=self._config.index_name,
ids=ids[pos : pos + self.MAX_ES_RETURNED_DOCS],
)['docs']
for doc in es_docs:
if doc['found']:
accumulated_docs.append(
Document.from_base64(doc['_source']['blob'])
)
else:
accumulated_docs_id_not_found.append(doc['_id'])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
if self._list_like:
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
|
from typing import Iterable, Dict, Sequence
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
for pos in range(0, len(ids), self.MAX_ES_RETURNED_DOCS):
es_docs = self._client.mget(
index=self._config.index_name,
ids=ids[pos : pos + self.MAX_ES_RETURNED_DOCS],
)['docs']
for doc in es_docs:
if doc['found']:
accumulated_docs.append(
Document.from_base64(doc['_source']['blob'])
)
else:
accumulated_docs_id_not_found.append(doc['_id'])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.tedlium3.lightning import TEDLIUM3RNNTModule
class MockTEDLIUM:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.tedlium3.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("torchaudio.datasets.TEDLIUM", new=MockTEDLIUM), patch(
"asr.emformer_rnnt.tedlium3.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield TEDLIUM3RNNTModule(
tedlium_path="tedlium_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestTEDLIUM3RNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.tedlium3.lightning import TEDLIUM3RNNTModule
class MockTEDLIUM:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.tedlium3.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("torchaudio.datasets.TEDLIUM", new=MockTEDLIUM), patch(
"asr.emformer_rnnt.tedlium3.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield TEDLIUM3RNNTModule(
tedlium_path="tedlium_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestTEDLIUM3RNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
import pkg_resources
from pkg_resources import get_distribution
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# refresh the pkg_resources
# more datails at https://github.com/pypa/setuptools/issues/373
importlib.reload(pkg_resources)
try:
get_distribution(package)
return True
except pkg_resources.DistributionNotFound:
return False
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
# if the package name is not the same as module name, module name should be
# inferred. For example, mmcv-full is the package name, but mmcv is module
# name. If we want to get the installed path of mmcv-full, we should concat
# the pkg.location and module name
pkg = get_distribution(package)
possible_path = osp.join(pkg.location, package)
if osp.exists(possible_path):
return possible_path
else:
return osp.join(pkg.location, package2module(package))
def package2module(package: str):
"""Infer module name from package.
Args:
package (str): Package to infer module name.
"""
pkg = get_distribution(package)
if pkg.has_metadata('top_level.txt'):
module_name = pkg.get_metadata('top_level.txt').split('\n')[0]
return module_name
else:
raise ValueError(f'can not infer the module name of {package}')
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise e # type: ignore
def install_package(package: str):
if not is_installed(package):
call_command(['python', '-m', 'pip', 'install', package])
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
import pkg_resources
from pkg_resources import get_distribution
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# refresh the pkg_resources
# more datails at https://github.com/pypa/setuptools/issues/373
importlib.reload(pkg_resources)
try:
get_distribution(package)
return True
except pkg_resources.DistributionNotFound:
return False
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
# if the package name is not the same as module name, module name should be
# inferred. For example, mmcv-full is the package name, but mmcv is module
# name. If we want to get the installed path of mmcv-full, we should concat
# the pkg.location and module name
pkg = get_distribution(package)
possible_path = osp.join(pkg.location, package)
if osp.exists(possible_path):
return possible_path
else:
return osp.join(pkg.location, package2module(package))
def package2module(package: str):
"""Infer module name from package.
Args:
package (str): Package to infer module name.
"""
pkg = get_distribution(package)
if pkg.has_metadata('top_level.txt'):
module_name = pkg.get_metadata('top_level.txt').split('\n')[0]
return module_name
else:
raise ValueError(f'can not infer the module name of {package}')
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise e # type: ignore
def check_install_package(package: str):
if not is_installed(package):
call_command(['python', '-m', 'pip', 'install', package])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocArray[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocArray[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocArray) -> DocArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocArray[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocArray[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocArray[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocArray[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocArray) -> DocArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batch(n_docs, batch_size, backend):
da = DocArray[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batch(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocArray[MyImage])
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore
class ElasticsearchDocumentStore(KVDocumentStore):
"""
Elasticsearch Document (Node) store.
An Elasticsearch store for Document and Node objects.
Args:
elasticsearch_kvstore (ElasticsearchKVStore): Elasticsearch key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
elasticsearch_kvstore: ElasticsearchKVStore,
namespace: Optional[str] = None,
node_collection_index: str = None,
ref_doc_collection_index: str = None,
metadata_collection_index: str = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a ElasticsearchDocumentStore."""
super().__init__(
elasticsearch_kvstore, namespace=namespace, batch_size=batch_size
)
if node_collection_index:
self._node_collection = node_collection_index
else:
self._node_collection = f"llama_index-docstore.data-{self._namespace}"
if ref_doc_collection_index:
self._ref_doc_collection = ref_doc_collection_index
else:
self._ref_doc_collection = (
f"llama_index-docstore.ref_doc_info-{self._namespace}"
)
if metadata_collection_index:
self._metadata_collection = metadata_collection_index
else:
self._metadata_collection = (
f"llama_index-docstore.metadata-{self._namespace}"
)
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.elasticsearch import ElasticsearchKVStore
class ElasticsearchDocumentStore(KVDocumentStore):
"""Elasticsearch Document (Node) store.
An Elasticsearch store for Document and Node objects.
Args:
elasticsearch_kvstore (ElasticsearchKVStore): Elasticsearch key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
elasticsearch_kvstore: ElasticsearchKVStore,
namespace: Optional[str] = None,
node_collection_index: str = None,
ref_doc_collection_index: str = None,
metadata_collection_index: str = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a ElasticsearchDocumentStore."""
super().__init__(
elasticsearch_kvstore, namespace=namespace, batch_size=batch_size
)
if node_collection_index:
self._node_collection = node_collection_index
else:
self._node_collection = f"llama_index-docstore.data-{self._namespace}"
if ref_doc_collection_index:
self._ref_doc_collection = ref_doc_collection_index
else:
self._ref_doc_collection = (
f"llama_index-docstore.ref_doc_info-{self._namespace}"
)
if metadata_collection_index:
self._metadata_collection = metadata_collection_index
else:
self._metadata_collection = (
f"llama_index-docstore.metadata-{self._namespace}"
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.8.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
"datasets",
"accelerate>=0.20.3",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.8.0.dev0",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"dev": [
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, get_detector_cfg
def _fake_roi_head(cfg_file):
"""Set a fake roi head config."""
model = get_detector_cfg(cfg_file)
roi_head = model.roi_head
rcnn_train_cfg = model.train_cfg.rcnn if model.train_cfg is not None \
else None
roi_head.update(train_cfg=rcnn_train_cfg)
return roi_head
def _fake_proposals(img_metas, proposal_len):
"""Create a fake proposal list."""
results = []
for i in range(len(img_metas)):
result = InstanceData(metainfo=img_metas[i])
proposal = torch.randn(proposal_len, 4).to(device='cuda')
result.bboxes = proposal
results.append(result)
return results
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = _fake_roi_head(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = _fake_roi_head(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
proposal_list = _fake_proposals(img_metas, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = _fake_proposals(img_metas, 100)
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(s, s, 3)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (torch.Tensor): Bounding boxes
gt_bboxes (torch.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
|
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (torch.Tensor): Bounding boxes
gt_bboxes (torch.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be set')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be setted')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
from pathlib import Path
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images for each country.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images for each country.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDoc):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 10
rng = np.random.RandomState(0)
matrices = [rng.random(size=(100, 100)) for _ in range(n_docs)]
da = DocList[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
docs=da,
func=cpu_intensive,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocList[MyMatrix]) -> DocList[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batched_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(100, 100)) for _ in range(n_docs)]
da = DocList[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batched(
docs=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocList[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(
docs=da, func=io_intensive, backend='thread', num_worker=num_workers
)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocList[ImageDoc]) -> DocList[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batched_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocList[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batched(
docs=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDoc):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocList[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
docs=da,
func=cpu_intensive,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocList[MyMatrix]) -> DocList[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batched_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocList[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batched(
docs=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocList[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(
docs=da, func=io_intensive, backend='thread', num_worker=num_workers
)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocList[ImageDoc]) -> DocList[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batched_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocList[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batched(
docs=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc6'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from pathlib import Path
from typing import Any, Callable, Optional, Union
from .folder import default_loader
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, Path]], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[Union[str, Path]], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
self.loader = loader
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = self.loader(image_file)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
# mypy: allow-untyped-defs
import torch
from torch import Tensor
aten = torch.ops.aten
import inspect
import warnings
from typing import Callable, Optional, TypeVar
from typing_extensions import ParamSpec
from torch.types import Number
decomposition_table: dict[str, torch.jit.ScriptFunction] = {}
function_name_set: set[str] = set()
_T = TypeVar("_T")
_P = ParamSpec("_P")
def check_decomposition_has_type_annotations(f):
inspect_empty = inspect._empty # type: ignore[attr-defined]
sig = inspect.signature(f)
for param in sig.parameters.values():
assert param.annotation != inspect_empty, (
f"No signature on param {param.name} for function {f.name}"
)
assert sig.return_annotation != inspect_empty, (
f"No return annotation for function {f.name}"
)
def signatures_match(decomposition_sig, torch_op_sig):
decomp_params = decomposition_sig.parameters
op_params = torch_op_sig.parameters
if len(decomp_params) != len(op_params):
return False
for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
# can't check full equality yet because not all fields are correctly deduced
# in the torch_op_sig - like default value
# can't check 'kind' bc
# kwarg-only values with defaults not yet supported in TS
inspect_empty = inspect._empty # type: ignore[attr-defined]
for field in ["name", "annotation"]:
if field == "name" and decomp_param.name == "self":
warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
if getattr(decomp_param, field) != getattr(op_param, field):
return False
decomp_default = decomp_param.default
op_default = op_param.default
# default value not always correctly inferred as being present on torch schema,
# but if specified on both they should be equal
if decomp_default != inspect_empty and op_default != inspect_empty:
if decomp_default != op_default:
return False
return decomposition_sig.return_annotation == torch_op_sig.return_annotation
def register_decomposition(
aten_op: torch._ops.OpOverload,
registry: Optional[dict[str, torch.jit.ScriptFunction]] = None,
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
def decomposition_decorator(f: Callable[_P, _T]) -> Callable[_P, _T]:
nonlocal registry
if registry is None:
registry = decomposition_table
assert isinstance(aten_op, torch._ops.OpOverload)
# Need unique name for jit function serialization
assert f.__name__ not in function_name_set, (
f"Duplicated function name {f.__name__}"
)
function_name_set.add(f.__name__)
scripted_func = torch.jit.script(f)
torch._C._jit_pass_inline(scripted_func.graph)
for _ in range(2):
torch._C._jit_pass_peephole(scripted_func.graph)
torch._C._jit_pass_constant_propagation(scripted_func.graph)
registry[str(aten_op._schema)] = scripted_func
return f
return decomposition_decorator
# TODO: replace torch.sigmoid -> aten.sigmoid
@register_decomposition(aten.var.correction)
def var_decomposition(
input: Tensor,
dim: Optional[list[int]] = None,
correction: Optional[Number] = None,
keepdim: bool = False,
) -> Tensor:
if dim is None:
dim_i: list[int] = []
dim = dim_i
if isinstance(dim, (tuple, list)) and len(dim) == 0:
n = input.numel()
else:
n = 1
for dim_i in dim: # type: ignore[assignment]
n *= input.shape[dim_i] # type: ignore[call-overload]
mean = aten.mean(input, dim, True)
sub = input - mean
sq = sub * sub
sum = aten.sum(sq, dim, keepdim)
if correction is None:
denom = float(n - 1)
else:
if isinstance(correction, int):
denom = float(n - correction)
elif isinstance(correction, float):
denom = float(n) - correction
else:
raise RuntimeError("correction must be int or float")
return sum / max(0, denom)
@register_decomposition(aten.var.default)
def var(input: Tensor, unbiased: bool = True) -> Tensor:
return var_decomposition(input, correction=(1 if unbiased else 0))
|
# mypy: allow-untyped-defs
import torch
from torch import Tensor
aten = torch.ops.aten
import inspect
import warnings
from typing import Callable, Optional, TypeVar
from typing_extensions import ParamSpec
from torch.types import Number
decomposition_table: dict[str, torch.jit.ScriptFunction] = {}
function_name_set: set[str] = set()
_T = TypeVar("_T")
_P = ParamSpec("_P")
def check_decomposition_has_type_annotations(f):
inspect_empty = inspect._empty # type: ignore[attr-defined]
sig = inspect.signature(f)
for param in sig.parameters.values():
assert param.annotation != inspect_empty, (
f"No signature on param {param.name} for function {f.name}"
)
assert sig.return_annotation != inspect_empty, (
f"No return annotation for function {f.name}"
)
def signatures_match(decomposition_sig, torch_op_sig):
decomp_params = decomposition_sig.parameters
op_params = torch_op_sig.parameters
if len(decomp_params) != len(op_params):
return False
for decomp_param, op_param in zip(decomp_params.values(), op_params.values()):
# can't check full equality yet because not all fields are correcly deduced
# in the torch_op_sig - like default value
# can't check 'kind' bc
# kwarg-only values with defaults not yet supported in TS
inspect_empty = inspect._empty # type: ignore[attr-defined]
for field in ["name", "annotation"]:
if field == "name" and decomp_param.name == "self":
warnings.warn("PyTorch uses 'input' instead of 'self' on public api")
if getattr(decomp_param, field) != getattr(op_param, field):
return False
decomp_default = decomp_param.default
op_default = op_param.default
# default value not always correctly inferred as being present on torch schema,
# but if specified on both they should be equal
if decomp_default != inspect_empty and op_default != inspect_empty:
if decomp_default != op_default:
return False
return decomposition_sig.return_annotation == torch_op_sig.return_annotation
def register_decomposition(
aten_op: torch._ops.OpOverload,
registry: Optional[dict[str, torch.jit.ScriptFunction]] = None,
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
def decomposition_decorator(f: Callable[_P, _T]) -> Callable[_P, _T]:
nonlocal registry
if registry is None:
registry = decomposition_table
assert isinstance(aten_op, torch._ops.OpOverload)
# Need unique name for jit function serialization
assert f.__name__ not in function_name_set, (
f"Duplicated function name {f.__name__}"
)
function_name_set.add(f.__name__)
scripted_func = torch.jit.script(f)
torch._C._jit_pass_inline(scripted_func.graph)
for _ in range(2):
torch._C._jit_pass_peephole(scripted_func.graph)
torch._C._jit_pass_constant_propagation(scripted_func.graph)
registry[str(aten_op._schema)] = scripted_func
return f
return decomposition_decorator
# TODO: replace torch.sigmoid -> aten.sigmoid
@register_decomposition(aten.var.correction)
def var_decomposition(
input: Tensor,
dim: Optional[list[int]] = None,
correction: Optional[Number] = None,
keepdim: bool = False,
) -> Tensor:
if dim is None:
dim_i: list[int] = []
dim = dim_i
if isinstance(dim, (tuple, list)) and len(dim) == 0:
n = input.numel()
else:
n = 1
for dim_i in dim: # type: ignore[assignment]
n *= input.shape[dim_i] # type: ignore[call-overload]
mean = aten.mean(input, dim, True)
sub = input - mean
sq = sub * sub
sum = aten.sum(sq, dim, keepdim)
if correction is None:
denom = float(n - 1)
else:
if isinstance(correction, int):
denom = float(n - correction)
elif isinstance(correction, float):
denom = float(n) - correction
else:
raise RuntimeError("correction must be int or float")
return sum / max(0, denom)
@register_decomposition(aten.var.default)
def var(input: Tensor, unbiased: bool = True) -> Tensor:
return var_decomposition(input, correction=(1 if unbiased else 0))
|
# coding: utf-8
import logging
import numpy as np
import lightgbm as lgb
def test_register_logger(tmp_path):
logger = logging.getLogger("LightGBM")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s | %(message)s')
log_filename = tmp_path / "LightGBM_test_logger.log"
file_handler = logging.FileHandler(log_filename, mode="w", encoding="utf-8")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def dummy_metric(_, __):
logger.debug('In dummy_metric')
return 'dummy_metric', 1, True
lgb.register_logger(logger)
X = np.array([[1, 2, 3],
[1, 2, 4],
[1, 2, 4],
[1, 2, 3]],
dtype=np.float32)
y = np.array([0, 1, 1, 0])
lgb_data = lgb.Dataset(X, y)
eval_records = {}
callbacks = [
lgb.record_evaluation(eval_records),
lgb.log_evaluation(2),
lgb.early_stopping(4)
]
lgb.train({'objective': 'binary', 'metric': ['auc', 'binary_error']},
lgb_data, num_boost_round=10, feval=dummy_metric,
valid_sets=[lgb_data], categorical_feature=[1], callbacks=callbacks)
lgb.plot_metric(eval_records)
expected_log = r"""
INFO | [LightGBM] [Warning] There are no meaningful features which satisfy the provided configuration. Decreasing Dataset parameters min_data_in_bin or min_data_in_leaf and re-constructing Dataset might resolve this warning.
INFO | [LightGBM] [Info] Number of positive: 2, number of negative: 2
INFO | [LightGBM] [Info] Total Bins 0
INFO | [LightGBM] [Info] Number of data points in the train set: 4, number of used features: 0
INFO | [LightGBM] [Info] [binary:BoostFromScore]: pavg=0.500000 -> initscore=0.000000
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | Training until validation scores don't improve for 4 rounds
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [2] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [4] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [6] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [8] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [10] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | Did not meet early stopping. Best iteration is:
[1] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
WARNING | More than one metric available, picking one to plot.
""".strip()
gpu_lines = [
"INFO | [LightGBM] [Info] This is the GPU trainer",
"INFO | [LightGBM] [Info] Using GPU Device:",
"INFO | [LightGBM] [Info] Compiling OpenCL Kernel with 16 bins...",
"INFO | [LightGBM] [Info] GPU programs have been built",
"INFO | [LightGBM] [Warning] GPU acceleration is disabled because no non-trivial dense features can be found",
"INFO | [LightGBM] [Warning] Using sparse features with CUDA is currently not supported.",
"INFO | [LightGBM] [Warning] CUDA currently requires double precision calculations.",
"INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!"
]
with open(log_filename, "rt", encoding="utf-8") as f:
actual_log = f.read().strip()
actual_log_wo_gpu_stuff = []
for line in actual_log.split("\n"):
if not any(line.startswith(gpu_line) for gpu_line in gpu_lines):
actual_log_wo_gpu_stuff.append(line)
assert "\n".join(actual_log_wo_gpu_stuff) == expected_log
|
# coding: utf-8
import logging
import numpy as np
import lightgbm as lgb
def test_register_logger(tmp_path):
logger = logging.getLogger("LightGBM")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s | %(message)s')
log_filename = tmp_path / "LightGBM_test_logger.log"
file_handler = logging.FileHandler(log_filename, mode="w", encoding="utf-8")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def dummy_metric(_, __):
logger.debug('In dummy_metric')
return 'dummy_metric', 1, True
lgb.register_logger(logger)
X = np.array([[1, 2, 3],
[1, 2, 4],
[1, 2, 4],
[1, 2, 3]],
dtype=np.float32)
y = np.array([0, 1, 1, 0])
lgb_data = lgb.Dataset(X, y)
eval_records = {}
callbacks = [
lgb.record_evaluation(eval_records),
lgb.log_evaluation(2),
lgb.early_stopping(4)
]
lgb.train({'objective': 'binary', 'metric': ['auc', 'binary_error']},
lgb_data, num_boost_round=10, feval=dummy_metric,
valid_sets=[lgb_data], categorical_feature=[1], callbacks=callbacks)
lgb.plot_metric(eval_records)
expected_log = r"""
INFO | [LightGBM] [Warning] There are no meaningful features, as all feature values are constant.
INFO | [LightGBM] [Info] Number of positive: 2, number of negative: 2
INFO | [LightGBM] [Info] Total Bins 0
INFO | [LightGBM] [Info] Number of data points in the train set: 4, number of used features: 0
INFO | [LightGBM] [Info] [binary:BoostFromScore]: pavg=0.500000 -> initscore=0.000000
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | Training until validation scores don't improve for 4 rounds
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [2] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [4] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [6] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [8] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [LightGBM] [Warning] Stopped training because there are no more leaves that meet the split requirements
DEBUG | In dummy_metric
INFO | [10] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
INFO | Did not meet early stopping. Best iteration is:
[1] training's auc: 0.5 training's binary_error: 0.5 training's dummy_metric: 1
WARNING | More than one metric available, picking one to plot.
""".strip()
gpu_lines = [
"INFO | [LightGBM] [Info] This is the GPU trainer",
"INFO | [LightGBM] [Info] Using GPU Device:",
"INFO | [LightGBM] [Info] Compiling OpenCL Kernel with 16 bins...",
"INFO | [LightGBM] [Info] GPU programs have been built",
"INFO | [LightGBM] [Warning] GPU acceleration is disabled because no non-trivial dense features can be found",
"INFO | [LightGBM] [Warning] Using sparse features with CUDA is currently not supported.",
"INFO | [LightGBM] [Warning] CUDA currently requires double precision calculations.",
"INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!"
]
with open(log_filename, "rt", encoding="utf-8") as f:
actual_log = f.read().strip()
actual_log_wo_gpu_stuff = []
for line in actual_log.split("\n"):
if not any(line.startswith(gpu_line) for gpu_line in gpu_lines):
actual_log_wo_gpu_stuff.append(line)
assert "\n".join(actual_log_wo_gpu_stuff) == expected_log
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
at_k (int): The evaluation is done at k. Default is 10.
name (str): Name of the dataset. Used for logging and saving the results to a CSV file.
write_csv (bool): Whether to write the results to a CSV file. Default is True.
mrr_at_k (int): Deprecated. Use `at_k` instead.
"""
def __init__(
self,
samples,
at_k: int = 10,
name: str = "",
write_csv: bool = True,
mrr_at_k: int | None = None,
):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
f"MRR@{self.at_k}",
f"NDCG@{self.at_k}",
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
f"Queries: {num_queries} \t Positives: Min {np.min(num_positives):.1f}, Mean {np.mean(num_positives):.1f}, Max {np.max(num_positives):.1f} \t Negatives: Min {np.min(num_negatives):.1f}, Mean {np.mean(num_negatives):.1f}, Max {np.max(num_negatives):.1f}"
)
logger.info(f"MRR@{self.at_k}: {mean_mrr * 100:.2f}")
logger.info(f"NDCG@{self.at_k}: {mean_ndcg * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: int | None = None):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
f"MRR@{self.at_k}",
f"NDCG@{self.at_k}",
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
f"Queries: {num_queries} \t Positives: Min {np.min(num_positives):.1f}, Mean {np.mean(num_positives):.1f}, Max {np.max(num_positives):.1f} \t Negatives: Min {np.min(num_negatives):.1f}, Mean {np.mean(num_negatives):.1f}, Max {np.max(num_negatives):.1f}"
)
logger.info(f"MRR@{self.at_k}: {mean_mrr * 100:.2f}")
logger.info(f"NDCG@{self.at_k}: {mean_ndcg * 100:.2f}")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "testuser2#example.com",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, test_graph.version, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user() -> User:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser#example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
from __future__ import annotations
import math
from pathlib import Path
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
output = static_embedding_model(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding_model: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding_model.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding_model.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from llama_index.readers.web import RssReader
def test_rss_reader_non_strict_sources():
default_reader = RssReader()
documents = default_reader.load_data(urls=["https://news.ycombinator.com/rss"])
assert len(documents) > 0
def test_rss_reader_user_agent():
reader = RssReader(user_agent="MyApp/1.0 +http://example.com/")
documents = reader.load_data(urls=["https://news.ycombinator.com/rss"])
assert len(documents) > 0
|
from llama_index.readers.web import RssReader
def test_rss_reader_non_strict_sources():
default_reader = RssReader()
documents = default_reader.load_data(urls=["https://news.ycombinator.com/rss"])
assert len(documents) > 0
def test_rss_reader_rsshub():
default_reader = RssReader()
documents = default_reader.load_data(urls=["https://rsshub.app/hackernews/newest"])
assert len(documents) == 0
def test_rss_reader_user_agent():
reader = RssReader(user_agent="MyApp/1.0 +http://example.com/")
documents = reader.load_data(urls=["https://rsshub.app/hackernews/newest"])
assert len(documents) > 0
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return await backend.server.routers.v1.execute_graph(
graph_id, node_input, user_id
)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import os
import shutil
import time
from jina import DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.py')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(2.0)
def test_reload_simple_executor(tmpdir):
from tests.integration.hot_reload.exec1.my_executor1 import MyExecutorToReload1
f = Flow().add(uses=MyExecutorToReload1, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(cur_dir, 'my_executor_1_new.py'),
os.path.join(cur_dir, 'exec1/my_executor1.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_helper(tmpdir):
from tests.integration.hot_reload.exec2.my_executor2 import MyExecutorToReload2
f = Flow().add(uses=MyExecutorToReload2, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(cur_dir, 'helper2.py'),
os.path.join(cur_dir, 'exec2/helper.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_with_inheritance(tmpdir):
from tests.integration.hot_reload.exec3.my_executor3 import A, EnhancedExecutor
f = Flow().add(uses=A, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
with _update_file(
os.path.join(cur_dir, 'my_executor_3_new.py'),
os.path.join(cur_dir, 'exec3/my_executor3.py'),
str(tmpdir),
):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'AAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
|
import os
import time
import shutil
import contextlib
from jina import Flow, DocumentArray
cur_dir = os.path.dirname(os.path.abspath(__file__))
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.py')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(2.0)
def test_reload_simple_executor(tmpdir):
from tests.integration.hot_reload.exec1.my_executor1 import MyExecutorToReload1
f = Flow().add(uses=MyExecutorToReload1, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(os.path.join(cur_dir, 'my_executor_1_new.py'), os.path.join(cur_dir, 'exec1/my_executor1.py'),
str(tmpdir)):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_helper(tmpdir):
from tests.integration.hot_reload.exec2.my_executor2 import MyExecutorToReload2
f = Flow().add(uses=MyExecutorToReload2, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(os.path.join(cur_dir, 'helper2.py'), os.path.join(cur_dir, 'exec2/helper.py'),
str(tmpdir)):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
def test_reload_with_inheritance(tmpdir):
from tests.integration.hot_reload.exec3.my_executor3 import A, EnhancedExecutor
f = Flow().add(uses=A, reload=True)
with f:
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
with _update_file(os.path.join(cur_dir, 'my_executor_3_new.py'), os.path.join(cur_dir, 'exec3/my_executor3.py'),
str(tmpdir)):
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'AAfterReload'
res = f.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'ABeforeReload'
|
_base_ = './htc_x101-32x4d_fpn_16xb1-20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
groups=64,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './htc_x101_32x4d_fpn_16x1_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
groups=64,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from packaging import version
from requests import HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION < version.parse("0.20.0"):
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
status_codes = [500, 503]
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError) as err:
if isinstance(err, RuntimeError) and isinstance(err.__cause__, HTTPError):
err = err.__cause__
if retry >= max_retries or err.response.status_code not in status_codes:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:
if version.parse(hfh.__version__).release < version.parse("0.11.0").release:
# old versions of hfh don't url-encode the file path
path = quote(path)
return hfh.hf_hub_url(repo_id, path, repo_type="dataset", revision=revision)
|
"""An internal script to process `new_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import json
import os
from collections import Counter
from copy import deepcopy
from get_previous_daily_ci import get_last_daily_ci_run
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
job_name = os.environ.get("JOB_NAME")
with open("new_failures_with_bad_commit.json") as fp:
data = json.load(fp)
with open(f"ci_results_{job_name}/job_links.json") as fp:
job_links = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
# prepare job_link and add it to each entry of new failed test information.
# need to change from `single-gpu` to `single` and same for `multi-gpu` to match `job_link`.
key = model
if list(job_links.keys()) == [job_name]:
key = job_name
job_link = job_links[key][device.replace("-gpu", "")]
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
for x in failed_tests:
x.update({"job_link": job_link})
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
# if it is not a scheduled run, upload the reports to a subfolder under `report_repo_folder`
report_repo_subfolder = ""
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
report_repo_subfolder = f"runs/{report_repo_subfolder}"
workflow_run = get_last_daily_ci_run(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv("GITHUB_RUN_ID")
)
workflow_run_created_time = workflow_run["created_at"]
report_repo_folder = workflow_run_created_time.split("T")[0]
if report_repo_subfolder:
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
report_repo_id = os.getenv("REPORT_REPO_ID")
with open("new_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/new_failures_with_bad_commit_grouped_by_authors.json",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/{report_repo_id}/raw/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/new_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
|
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import json
import os
from collections import Counter
from copy import deepcopy
from get_previous_daily_ci import get_last_daily_ci_run
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
with open("new_model_failures_with_bad_commit.json") as fp:
data = json.load(fp)
with open("ci_results_run_models_gpu/model_job_links.json") as fp:
model_job_links = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
# prepare job_link and add it to each entry of new failed test information.
# need to change from `single-gpu` to `single` and same for `multi-gpu` to match `job_link`.
job_link = model_job_links[model][device.replace("-gpu", "")]
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
for x in failed_tests:
x.update({"job_link": job_link})
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
# if it is not a scheduled run, upload the reports to a subfolder under `report_repo_folder`
report_repo_subfolder = ""
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
report_repo_subfolder = f"runs/{report_repo_subfolder}"
workflow_run = get_last_daily_ci_run(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv("GITHUB_RUN_ID")
)
workflow_run_created_time = workflow_run["created_at"]
report_repo_folder = workflow_run_created_time.split("T")[0]
if report_repo_subfolder:
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{report_repo_folder}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
repo_id="hf-internal-testing/transformers_daily_ci",
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{report_repo_folder}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
train_cfg = dict(val_interval=24)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import ZepChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZepChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZepChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import ZepChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZepChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZepChatMessageHistory",
]
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import testing
class SpectralNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_spectralnorm(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4)),
expected_output_shape=(10, 3, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Embedding(10, 4)},
input_data=np.random.randint(10, size=(10,)).astype("float32"),
expected_output_shape=(10, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
def test_invalid_power_iterations(self):
with self.assertRaisesRegex(
ValueError, "`power_iterations` should be greater than zero."
):
layers.SpectralNormalization(layers.Dense(2), power_iterations=0)
def test_invalid_layer(self):
layer = layers.SpectralNormalization(layers.ReLU())
inputs = np.ones(shape=(4, 2))
with self.assertRaisesRegex(
ValueError, "object has no attribute 'kernel' nor 'embeddings'"
):
layer(inputs)
def test_apply_layer(self):
if backend.config.image_data_format() == "channels_last":
images = np.ones((1, 2, 2, 1))
else:
images = np.ones((1, 1, 2, 2))
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
1, (2, 2), kernel_initializer=initializers.Constant(value=1)
),
power_iterations=8,
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = np.array([[[[4.0]]]], dtype=np.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
@pytest.mark.requires_trainable_backend
def test_end_to_end(self):
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
3, (2, 2), padding="same", data_format="channels_last"
),
power_iterations=2,
)
model = models.Sequential([sn_wrapper])
model.compile("rmsprop", loss="mse")
x = np.random.random((4, 8, 8, 3))
y = np.random.random((4, 8, 8, 3))
model.fit(x, y)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import testing
class SpectralNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_spectralnorm(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4)),
expected_output_shape=(10, 3, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Embedding(10, 4)},
input_data=np.random.randint(10, size=(10,)),
expected_output_shape=(10, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
def test_invalid_power_iterations(self):
with self.assertRaisesRegex(
ValueError, "`power_iterations` should be greater than zero."
):
layers.SpectralNormalization(layers.Dense(2), power_iterations=0)
def test_invalid_layer(self):
layer = layers.SpectralNormalization(layers.ReLU())
inputs = np.ones(shape=(4, 2))
with self.assertRaisesRegex(
ValueError, "object has no attribute 'kernel' nor 'embeddings'"
):
layer(inputs)
def test_apply_layer(self):
if backend.config.image_data_format() == "channels_last":
images = np.ones((1, 2, 2, 1))
else:
images = np.ones((1, 1, 2, 2))
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
1, (2, 2), kernel_initializer=initializers.Constant(value=1)
),
power_iterations=8,
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = np.array([[[[4.0]]]], dtype=np.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
@pytest.mark.requires_trainable_backend
def test_end_to_end(self):
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
3, (2, 2), padding="same", data_format="channels_last"
),
power_iterations=2,
)
model = models.Sequential([sn_wrapper])
model.compile("rmsprop", loss="mse")
x = np.random.random((4, 8, 8, 3))
y = np.random.random((4, 8, 8, 3))
model.fit(x, y)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.