input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol),
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.info(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.info(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
import urllib
from http import HTTPStatus
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol),
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.info(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.info(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
_base_ = './freeanchor_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
eval_results.move_to_end('mAP', last=False)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
eval_results.move_to_end('mAP', last=False)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper,
DefaultOptimWrapperConstructor, OptimWrapper,
OptimWrapperDict, ZeroRedundancyOptimizer,
build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper,
DefaultOptimWrapperConstructor, OptimWrapper,
OptimWrapperDict, ZeroRedundancyOptimizer,
build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler',
'ZeroRedundancyOptimizer'
]
|
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.orchestrate.deployments import Deployment
from jina.serve.gateway import BaseGateway
def _get_all_parser(cls: Type['JAMLCompatible']):
"""Get all parsers and legacy parser of a class
:param cls: target class
:return: a tuple of two elements; first is a list of all parsers, second is the legacy parser for default fallback
"""
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
if issubclass(cls, Flow):
return _get_flow_parser()
elif issubclass(cls, BaseExecutor):
return _get_exec_parser()
elif issubclass(cls, BaseGateway):
return _get_gateway_parser()
elif issubclass(cls, Deployment):
return _get_deployment_parser()
else:
raise NotImplementedError(f'No parser exists for cls {cls.__name__}')
def _get_flow_parser():
from jina.jaml.parsers.flow.v1 import V1Parser
return [V1Parser], V1Parser
def _get_exec_parser():
from jina.jaml.parsers.executor.legacy import ExecutorLegacyParser
return [ExecutorLegacyParser], ExecutorLegacyParser
def _get_deployment_parser():
from jina.jaml.parsers.deployment.legacy import DeploymentLegacyParser
return [DeploymentLegacyParser], DeploymentLegacyParser
def _get_gateway_parser():
from jina.jaml.parsers.gateway.legacy import GatewayLegacyParser
return [GatewayLegacyParser], GatewayLegacyParser
def get_parser(
cls: Type['JAMLCompatible'], version: Optional[str]
) -> 'VersionedYAMLParser':
"""
.. # noqa: DAR401
:param cls: the target class to parse
:param version: yaml version number in "MAJOR[.MINOR]" format
:return: parser given the YAML version
"""
all_parsers, legacy_parser = _get_all_parser(cls)
if version:
if isinstance(version, (float, int)):
version = str(version)
for p in all_parsers:
if p.version == version:
return p()
for p in all_parsers:
# fallback to major
if version.split('.')[0] == p.version:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to parser for version: {p.version}',
UserWarning,
)
return p()
raise BadYAMLVersion(f'{version} is not a valid version number')
else:
if version is not None:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to legacy parser. '
f'this usually mean you are using a deprecated YAML format.',
DeprecationWarning,
)
# fallback to legacy parser
return legacy_parser()
def get_supported_versions(cls) -> List[str]:
"""List all supported versions
:param cls: the class to check
:return: supported versions sorted alphabetically
"""
all_parsers, _ = _get_all_parser(cls)
return list(sorted(p.version for p in all_parsers))
|
import warnings
from typing import List, Optional, Type
from jina.excepts import BadYAMLVersion
from jina.jaml import JAMLCompatible
from jina.jaml.parsers.base import VersionedYAMLParser
from jina.serve.gateway import BaseGateway
def _get_all_parser(cls: Type['JAMLCompatible']):
"""Get all parsers and legacy parser of a class
:param cls: target class
:return: a tuple of two elements; first is a list of all parsers, second is the legacy parser for default fallback
"""
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
if issubclass(cls, Flow):
return _get_flow_parser()
elif issubclass(cls, BaseExecutor):
return _get_exec_parser()
elif issubclass(cls, BaseGateway):
return _get_gateway_parser()
else:
raise NotImplementedError(f'No parser exists for cls {cls.__name__}')
def _get_flow_parser():
from jina.jaml.parsers.flow.v1 import V1Parser
return [V1Parser], V1Parser
def _get_exec_parser():
from jina.jaml.parsers.executor.legacy import ExecutorLegacyParser
return [ExecutorLegacyParser], ExecutorLegacyParser
def _get_gateway_parser():
from jina.jaml.parsers.gateway.legacy import GatewayLegacyParser
return [GatewayLegacyParser], GatewayLegacyParser
def get_parser(
cls: Type['JAMLCompatible'], version: Optional[str]
) -> 'VersionedYAMLParser':
"""
.. # noqa: DAR401
:param cls: the target class to parse
:param version: yaml version number in "MAJOR[.MINOR]" format
:return: parser given the YAML version
"""
all_parsers, legacy_parser = _get_all_parser(cls)
if version:
if isinstance(version, (float, int)):
version = str(version)
for p in all_parsers:
if p.version == version:
return p()
for p in all_parsers:
# fallback to major
if version.split('.')[0] == p.version:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to parser for version: {p.version}',
UserWarning,
)
return p()
raise BadYAMLVersion(f'{version} is not a valid version number')
else:
if version is not None:
warnings.warn(
f'can not find parser for version: {version}, '
f'fallback to legacy parser. '
f'this usually mean you are using a deprecated YAML format.',
DeprecationWarning,
)
# fallback to legacy parser
return legacy_parser()
def get_supported_versions(cls) -> List[str]:
"""List all supported versions
:param cls: the class to check
:return: supported versions sorted alphabetically
"""
all_parsers, _ = _get_all_parser(cls)
return list(sorted(p.version for p in all_parsers))
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
REPO_ROOT_DIR = Path(__file__).parent.parent.absolute()
TOYDATA_DIR = REPO_ROOT_DIR / 'tests' / 'toydata'
|
from pathlib import Path
REPO_ROOT_DIR = Path(__file__).parent.parent.absolute()
TOYDATA_DIR = REPO_ROOT_DIR / 'tests' / 'toydata'
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity: Active Dimensions: 55.6, Sparsity Ratio: 0.9982
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity Stats: Row Non-Zero Mean: 55.60933303833008, Row Sparsity Mean: 0.9981780648231506
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
from pathlib import Path
default_exec_file = Path(__file__).absolute().parents[2] / "lightgbm"
def pytest_addoption(parser):
parser.addoption("--execfile", action="store", default=str(default_exec_file))
|
from pathlib import Path
default_exec_file = Path(__file__).absolute().parents[2] / 'lightgbm'
def pytest_addoption(parser):
parser.addoption('--execfile', action='store', default=str(default_exec_file))
|
import logging
from pathlib import Path
from typing import Optional, Sequence
from llama_index.core.base.llms.types import ImageBlock
from llama_index.core.multi_modal_llms.base import ChatMessage, ImageNode
DEFAULT_OPENAI_API_TYPE = "open_ai"
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
GPT4V_MODELS = {
"gpt-4-vision-preview": 128000,
"gpt-4-turbo-2024-04-09": 128000,
"gpt-4-turbo": 128000,
"gpt-4o": 128000,
"gpt-4o-2024-05-13": 128000,
"gpt-4o-2024-08-06": 128000,
"gpt-4o-2024-11-20": 128000,
"gpt-4o-mini": 128000,
"gpt-4o-mini-2024-07-18": 128000,
"o1": 200000,
"o1-2024-12-17": 200000,
"o3-mini": 200000,
"o3-mini-2025-01-31": 200000,
}
MISSING_API_KEY_ERROR_MESSAGE = """No API key found for OpenAI.
Please set either the OPENAI_API_KEY environment variable or \
openai.api_key prior to initialization.
API keys can be found or created at \
https://platform.openai.com/account/api-keys
"""
logger = logging.getLogger(__name__)
def generate_openai_multi_modal_chat_message(
prompt: str,
role: str,
image_documents: Optional[Sequence[ImageNode]] = None,
image_detail: Optional[str] = "low",
) -> ChatMessage:
"""Create a ChatMessage to be used in a multimodal query."""
chat_msg = ChatMessage(role=role, content=prompt)
if image_documents is None:
# if image_documents is empty, return text only chat message
return chat_msg
for image_document in image_documents:
# Create the appropriate ContentBlock depending on the document content
if image_document.image:
chat_msg.blocks.append(
ImageBlock(
image=bytes(image_document.image, encoding="utf-8"),
detail=image_detail,
)
)
elif image_document.image_url:
chat_msg.blocks.append(
ImageBlock(url=image_document.image_url, detail=image_detail)
)
elif image_document.image_path:
chat_msg.blocks.append(
ImageBlock(
path=Path(image_document.image_path),
detail=image_detail,
image_mimetype=image_document.image_mimetype
or image_document.metadata.get("file_type"),
)
)
elif f_path := image_document.metadata.get("file_path"):
chat_msg.blocks.append(
ImageBlock(
path=Path(f_path),
detail=image_detail,
image_mimetype=image_document.metadata.get("file_type"),
)
)
return chat_msg
|
import logging
from pathlib import Path
from typing import Optional, Sequence
from llama_index.core.base.llms.types import ImageBlock
from llama_index.core.multi_modal_llms.base import ChatMessage, ImageNode
DEFAULT_OPENAI_API_TYPE = "open_ai"
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
GPT4V_MODELS = {
"gpt-4-vision-preview": 128000,
"gpt-4-turbo-2024-04-09": 128000,
"gpt-4-turbo": 128000,
"gpt-4o": 128000,
"gpt-4o-2024-05-13": 128000,
"gpt-4o-2024-08-06": 128000,
"gpt-4o-2024-11-20": 128000,
"gpt-4o-mini": 128000,
"gpt-4o-mini-2024-07-18": 128000,
"o1": 200000,
"o1-2024-12-17": 200000,
}
MISSING_API_KEY_ERROR_MESSAGE = """No API key found for OpenAI.
Please set either the OPENAI_API_KEY environment variable or \
openai.api_key prior to initialization.
API keys can be found or created at \
https://platform.openai.com/account/api-keys
"""
logger = logging.getLogger(__name__)
def generate_openai_multi_modal_chat_message(
prompt: str,
role: str,
image_documents: Optional[Sequence[ImageNode]] = None,
image_detail: Optional[str] = "low",
) -> ChatMessage:
"""Create a ChatMessage to be used in a multimodal query."""
chat_msg = ChatMessage(role=role, content=prompt)
if image_documents is None:
# if image_documents is empty, return text only chat message
return chat_msg
for image_document in image_documents:
# Create the appropriate ContentBlock depending on the document content
if image_document.image:
chat_msg.blocks.append(
ImageBlock(
image=bytes(image_document.image, encoding="utf-8"),
detail=image_detail,
)
)
elif image_document.image_url:
chat_msg.blocks.append(
ImageBlock(url=image_document.image_url, detail=image_detail)
)
elif image_document.image_path:
chat_msg.blocks.append(
ImageBlock(
path=Path(image_document.image_path),
detail=image_detail,
image_mimetype=image_document.image_mimetype
or image_document.metadata.get("file_type"),
)
)
elif f_path := image_document.metadata.get("file_path"):
chat_msg.blocks.append(
ImageBlock(
path=Path(f_path),
detail=image_detail,
image_mimetype=image_document.metadata.get("file_type"),
)
)
return chat_msg
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=2000),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import glob
import os
from datetime import datetime
import pytest
from jina import Document, Flow, __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
import glob
import os
from datetime import datetime
import pytest
from jina import Document, Flow, __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add().add()
with f:
f.index(Document())
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
f = Flow().add(quiet=True).add(quiet=True)
with f:
f.index(Document())
f = Flow(quiet=True).add().add()
with f:
f.index(Document())
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import transformers
from PIL import Image
from sentence_transformers.models.Router import InputModule
class CLIPModel(InputModule):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.model.save_pretrained(output_path, safe_serialization=safe_serialization)
self.processor.save_pretrained(output_path)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
local_path = cls.load_dir_path(
model_name_or_path=model_name_or_path,
subfolder=subfolder,
token=token,
cache_folder=cache_folder,
revision=revision,
local_files_only=local_files_only,
)
return cls(local_path)
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import transformers
from PIL import Image
from sentence_transformers.models.Asym import InputModule
class CLIPModel(InputModule):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.model.save_pretrained(output_path, safe_serialization=safe_serialization)
self.processor.save_pretrained(output_path)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
local_path = cls.load_dir_path(
model_name_or_path=model_name_or_path,
subfolder=subfolder,
token=token,
cache_folder=cache_folder,
revision=revision,
local_files_only=local_files_only,
)
return cls(local_path)
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class JinaEmbeddingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to embed")
credentials: JinaCredentialsInput = JinaCredentialsField()
model: str = SchemaField(
description="Jina embedding model to use",
default="jina-embeddings-v2-base-en",
)
class Output(BlockSchema):
embeddings: list = SchemaField(description="List of embeddings")
def __init__(self):
super().__init__(
id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6",
description="Generates embeddings using Jina AI",
categories={BlockCategory.AI},
input_schema=JinaEmbeddingBlock.Input,
output_schema=JinaEmbeddingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.jina.ai/v1/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
data = {"input": input_data.texts, "model": input_data.model}
response = Requests().post(url, headers=headers, json=data)
embeddings = [e["embedding"] for e in response.json()["data"]]
yield "embeddings", embeddings
|
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class JinaEmbeddingBlock(Block):
class Input(BlockSchema):
texts: list = SchemaField(description="List of texts to embed")
credentials: JinaCredentialsInput = JinaCredentialsField()
model: str = SchemaField(
description="Jina embedding model to use",
default="jina-embeddings-v2-base-en",
)
class Output(BlockSchema):
embeddings: list = SchemaField(description="List of embeddings")
def __init__(self):
super().__init__(
id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6",
description="Generates embeddings using Jina AI",
categories={BlockCategory.AI},
input_schema=JinaEmbeddingBlock.Input,
output_schema=JinaEmbeddingBlock.Output,
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.jina.ai/v1/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
data = {"input": input_data.texts, "model": input_data.model}
response = requests.post(url, headers=headers, json=data)
embeddings = [e["embedding"] for e in response.json()["data"]]
yield "embeddings", embeddings
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='AudioTensorFlowTensor')
@_register_proto(proto_type_name='audio_tensorflow_tensor')
class AudioTensorFlowTensor(
AbstractAudioTensor, TensorFlowTensor, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import AudioTensorFlowTensor, AudioUrl
class MyAudioDoc(BaseDocument):
title: str
audio_tensor: Optional[AudioTensorFlowTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=tf.random.normal((1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`~sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = (
(2 / (np.sqrt(3 * width) * np.pi**0.25))
* (1 - (x - center) ** 2 / width**2)
* np.exp(-((x - center) ** 2) / (2 * width**2))
)
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution, n_components=n_components)
D_multi = np.r_[
tuple(
ricker_matrix(width=w, resolution=resolution, n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000)
)
]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.0
y[np.logical_not(first_quarter)] = -1.0
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha,
# transform_n_nozero_coefs, color)
estimators = [
("OMP", "omp", None, 15, "navy"),
("Lasso", "lasso_lars", 2, None, "turquoise"),
]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(
zip((D_fixed, D_multi), ("fixed width", "multiple widths"))
):
plt.subplot(1, 2, subplot + 1)
plt.title("Sparse coding against %s dictionary" % title)
plt.plot(y, lw=lw, linestyle="--", label="Original signal")
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(
dictionary=D,
transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha,
transform_algorithm=algo,
)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(
x,
color=color,
lw=lw,
label="%s: %s nonzero coefs,\n%.2f error" % (title, density, squared_error),
)
# Soft thresholding debiasing
coder = SparseCoder(
dictionary=D, transform_algorithm="threshold", transform_alpha=20
)
x = coder.transform(y.reshape(1, -1))
_, idx = (x != 0).nonzero()
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y, rcond=None)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(
x,
color="darkorange",
lw=lw,
label="Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error"
% (len(idx), squared_error),
)
plt.axis("tight")
plt.legend(shadow=False, loc="best")
plt.subplots_adjust(0.04, 0.07, 0.97, 0.90, 0.09, 0.2)
plt.show()
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`~sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = (
(2 / (np.sqrt(3 * width) * np.pi**0.25))
* (1 - (x - center) ** 2 / width**2)
* np.exp(-((x - center) ** 2) / (2 * width**2))
)
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution, n_components=n_components)
D_multi = np.r_[
tuple(
ricker_matrix(width=w, resolution=resolution, n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000)
)
]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.0
y[np.logical_not(first_quarter)] = -1.0
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha,
# transform_n_nozero_coefs, color)
estimators = [
("OMP", "omp", None, 15, "navy"),
("Lasso", "lasso_lars", 2, None, "turquoise"),
]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(
zip((D_fixed, D_multi), ("fixed width", "multiple widths"))
):
plt.subplot(1, 2, subplot + 1)
plt.title("Sparse coding against %s dictionary" % title)
plt.plot(y, lw=lw, linestyle="--", label="Original signal")
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(
dictionary=D,
transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha,
transform_algorithm=algo,
)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(
x,
color=color,
lw=lw,
label="%s: %s nonzero coefs,\n%.2f error" % (title, density, squared_error),
)
# Soft thresholding debiasing
coder = SparseCoder(
dictionary=D, transform_algorithm="threshold", transform_alpha=20
)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y, rcond=None)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(
x,
color="darkorange",
lw=lw,
label="Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error"
% (len(idx), squared_error),
)
plt.axis("tight")
plt.legend(shadow=False, loc="best")
plt.subplots_adjust(0.04, 0.07, 0.97, 0.90, 0.09, 0.2)
plt.show()
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_TestCommandArgs = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
"num_proc",
],
defaults=[None, None, None, False, False, False, False, False, None],
)
def is_1percent_close(source, target):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def test_test_command(dataset_loading_script_dir):
args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True)
test_command = TestCommand(*args)
test_command.run()
dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md")
assert os.path.exists(dataset_readme_path)
dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir)
expected_dataset_infos = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}
),
splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
],
download_size=3940680,
dataset_size=2589981,
)
}
)
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
if key == "num_bytes":
assert is_1percent_close(result, expected)
elif key == "splits":
assert list(result) == list(expected)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
else:
result == expected
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_TestCommandArgs = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def is_1percent_close(source, target):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def test_test_command(dataset_loading_script_dir):
args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True)
test_command = TestCommand(*args)
test_command.run()
dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md")
assert os.path.exists(dataset_readme_path)
dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir)
expected_dataset_infos = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])
),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}
),
splits=[
{
"name": "train",
"num_bytes": 2351563,
"num_examples": 10000,
},
{
"name": "validation",
"num_bytes": 238418,
"num_examples": 1000,
},
],
download_size=3940680,
dataset_size=2589981,
)
}
)
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key)
if key == "num_bytes":
assert is_1percent_close(result, expected)
elif key == "splits":
assert list(result) == list(expected)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes)
else:
result == expected
|
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.save(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
|
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare_to(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare_to(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.resave_weights(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
|
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Context, Workflow
class DummyEvent(Event):
pass
class IntermediateEvent1(Event):
value: int
class IntermediateEvent2(Event):
value: int
class StepWorkflow(Workflow):
probe: str = ""
@step
async def step1(self, ctx: Context, ev: StartEvent) -> None:
ctx.send_event(IntermediateEvent1(value=21))
ctx.send_event(IntermediateEvent2(value=23))
@step
async def step2a(self, ev: IntermediateEvent1) -> StopEvent:
return StopEvent(result=ev.value * 2)
@step
async def step2b(self, ev: IntermediateEvent2) -> None:
self.probe = "test"
@pytest.mark.asyncio
async def test_simple_stepwise():
workflow = StepWorkflow(disable_validation=True)
handler = workflow.run(stepwise=True)
while produced_events := await handler.run_step():
for ev in produced_events:
handler.ctx.send_event(ev) # type: ignore
result = await handler
assert result == 42
# Ensure step2b was executed before exiting the workflow
assert workflow.probe == "test"
|
import pytest
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Context, Workflow
class DummyEvent(Event):
pass
class IntermediateEvent1(Event):
value: int
class IntermediateEvent2(Event):
value: int
class StepWorkflow(Workflow):
probe: str = ""
@step
async def step1(self, ctx: Context, ev: StartEvent) -> None:
ctx.send_event(IntermediateEvent1(value=21))
ctx.send_event(IntermediateEvent2(value=23))
@step
async def step2a(self, ev: IntermediateEvent1) -> StopEvent:
return StopEvent(result=ev.value * 2)
@step
async def step2b(self, ev: IntermediateEvent2) -> None:
self.probe = "test"
@pytest.mark.asyncio()
async def test_simple_stepwise():
workflow = StepWorkflow(disable_validation=True)
handler = workflow.run(stepwise=True)
while produced_events := await handler.run_step():
for ev in produced_events:
handler.ctx.send_event(ev) # type: ignore
result = await handler
assert result == 42
# Ensure step2b was executed before exiting the workflow
assert workflow.probe == "test"
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8430 Spearman: 0.8368
Model Sparsity: Active Dimensions: 81.1, Sparsity Ratio: 0.9973
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
Model Sparsity Stats: Row Non-Zero Mean: 81.0629997253418, Row Sparsity Mean: 0.997344046831131
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
from typing import Dict
from jina.helper import TYPE_CHECKING, T, deprecate_by, typename
if TYPE_CHECKING: # pragma: no cover
from jina.proto import jina_pb2
class ProtoTypeMixin:
"""The base mixin class of all Jina types.
.. note::
- All Jina types should inherit from this class.
- All subclass should have ``self._pb_body``
- All subclass should implement ``__init__`` with the possibility of initializing from ``None``, e.g.:
.. highlight:: python
.. code-block:: python
class MyJinaType(ProtoTypeMixin):
def __init__(self, proto: Optional[jina_pb2.SomePbMsg] = None):
self._pb_body = proto or jina_pb2.SomePbMsg()
"""
def to_json(self) -> str:
"""Return the object in JSON string
:return: JSON string of the object
"""
from google.protobuf.json_format import MessageToJson
return MessageToJson(
self.proto, preserving_proto_field_name=True, sort_keys=True
)
def to_dict(self, **kwargs) -> Dict:
"""Return the object in Python dictionary.
.. note::
Array like object such as :class:`numpy.ndarray` (i.e. anything described as :class:`jina_pb2.NdArrayProto`)
will be converted to Python list.
:param kwargs: Extra kwargs to be passed to MessageToDict, like use_integers_for_enums
:return: dict representation of the object
"""
from google.protobuf.json_format import MessageToDict
return MessageToDict(self.proto, preserving_proto_field_name=True, **kwargs)
@property
def proto(self) -> 'jina_pb2._reflection.GeneratedProtocolMessageType':
"""Return the underlying Protobuf object
:return: Protobuf representation of the object
"""
return self._pb_body
def to_bytes(self) -> bytes:
"""Return the serialized the message to a string.
For more Pythonic code, please use ``bytes(...)``.
:return: binary string representation of the object
"""
return self.proto.SerializePartialToString()
def __getstate__(self):
return self._pb_body.__getstate__()
def __setstate__(self, state):
self.__init__()
self._pb_body.__setstate__(state)
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __getattr__(self, name: str):
return getattr(self._pb_body, name)
def __repr__(self):
content = str(tuple(field[0].name for field in self.proto.ListFields()))
content += f' at {id(self)}'
return f'<{typename(self)} {content.strip()}>'
def MergeFrom(self: T, other: T) -> None:
"""Merge the content of target
:param other: the document to merge from
"""
self._pb_body.MergeFrom(other._pb_body)
def CopyFrom(self: T, other: T) -> None:
"""Copy the content of target
:param other: the document to copy from
"""
self._pb_body.CopyFrom(other._pb_body)
def clear(self) -> None:
"""Remove all values from all fields of this Document."""
self._pb_body.Clear()
def pop(self, *fields) -> None:
"""Remove the values from the given fields of this Document.
:param fields: field names
"""
for k in fields:
self._pb_body.ClearField(k)
def __eq__(self, other):
if other is None:
return False
return self.proto == other.proto
def __bytes__(self):
return self.to_bytes()
dict = deprecate_by(to_dict)
json = deprecate_by(to_json)
binary_str = deprecate_by(to_bytes)
|
from typing import Dict
from jina.helper import TYPE_CHECKING, T, deprecate_by, typename
if TYPE_CHECKING:
from jina.proto import jina_pb2
class ProtoTypeMixin:
"""The base mixin class of all Jina types.
.. note::
- All Jina types should inherit from this class.
- All subclass should have ``self._pb_body``
- All subclass should implement ``__init__`` with the possibility of initializing from ``None``, e.g.:
.. highlight:: python
.. code-block:: python
class MyJinaType(ProtoTypeMixin):
def __init__(self, proto: Optional[jina_pb2.SomePbMsg] = None):
self._pb_body = proto or jina_pb2.SomePbMsg()
"""
def to_json(self) -> str:
"""Return the object in JSON string
:return: JSON string of the object
"""
from google.protobuf.json_format import MessageToJson
return MessageToJson(
self.proto, preserving_proto_field_name=True, sort_keys=True
)
def to_dict(self, **kwargs) -> Dict:
"""Return the object in Python dictionary.
.. note::
Array like object such as :class:`numpy.ndarray` (i.e. anything described as :class:`jina_pb2.NdArrayProto`)
will be converted to Python list.
:param kwargs: Extra kwargs to be passed to MessageToDict, like use_integers_for_enums
:return: dict representation of the object
"""
from google.protobuf.json_format import MessageToDict
return MessageToDict(self.proto, preserving_proto_field_name=True, **kwargs)
@property
def proto(self) -> 'jina_pb2._reflection.GeneratedProtocolMessageType':
"""Return the underlying Protobuf object
:return: Protobuf representation of the object
"""
return self._pb_body
def to_bytes(self) -> bytes:
"""Return the serialized the message to a string.
For more Pythonic code, please use ``bytes(...)``.
:return: binary string representation of the object
"""
return self.proto.SerializePartialToString()
def __getstate__(self):
return self._pb_body.__getstate__()
def __setstate__(self, state):
self.__init__()
self._pb_body.__setstate__(state)
@property
def nbytes(self) -> int:
"""Return total bytes consumed by protobuf.
:return: number of bytes
"""
return len(bytes(self))
def __getattr__(self, name: str):
return getattr(self._pb_body, name)
def __repr__(self):
content = str(tuple(field[0].name for field in self.proto.ListFields()))
content += f' at {id(self)}'
return f'<{typename(self)} {content.strip()}>'
def MergeFrom(self: T, other: T) -> None:
"""Merge the content of target
:param other: the document to merge from
"""
self._pb_body.MergeFrom(other._pb_body)
def CopyFrom(self: T, other: T) -> None:
"""Copy the content of target
:param other: the document to copy from
"""
self._pb_body.CopyFrom(other._pb_body)
def clear(self) -> None:
"""Remove all values from all fields of this Document."""
self._pb_body.Clear()
def pop(self, *fields) -> None:
"""Remove the values from the given fields of this Document.
:param fields: field names
"""
for k in fields:
self._pb_body.ClearField(k)
def __eq__(self, other):
if other is None:
return False
return self.proto == other.proto
def __bytes__(self):
return self.to_bytes()
dict = deprecate_by(to_dict)
json = deprecate_by(to_json)
binary_str = deprecate_by(to_bytes)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from video_torch_encoder import ConvertFCHWtoCFHW, ConvertFHWCtoFCHW, VideoTorchEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=False, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False, download_progress=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=True, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(
use_default_preprocessing=True,
model_name=model_name,
download_progress=False,
)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True, progress=False).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
@pytest.mark.gpu
def test_video_torch_encoder_use_default_preprocessing_gpu(model_name):
ex = VideoTorchEncoder(
model_name=model_name,
use_default_preprocessing=True,
device='cuda',
download_progress=False,
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
assert ex.device == 'cuda'
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from ...video_torch_encoder import (
ConvertFCHWtoCFHW,
ConvertFHWCtoFCHW,
VideoTorchEncoder,
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=False, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False, download_progress=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=True, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(
use_default_preprocessing=True,
model_name=model_name,
download_progress=False,
)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True, progress=False).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
@pytest.mark.gpu
def test_video_torch_encoder_use_default_preprocessing_gpu(model_name):
ex = VideoTorchEncoder(
model_name=model_name,
use_default_preprocessing=True,
device='cuda',
download_progress=False,
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
assert ex.device == 'cuda'
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-bilstm-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 8. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-bilstm-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode_mean_tokens=False,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=True,
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Sparsity Stats Query : Row Non-Zero Mean: 43.08049392700195, Row Sparsity Mean: 0.9985886216163635
Model Sparsity Stats Corpus : Row Non-Zero Mean: 206.8623504638672, Row Sparsity Mean: 0.9932224750518799
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Query info: num_rows: 323, num_cols: 30522, row_non_zero_mean: 42.891639709472656, row_sparsity_mean: 0.9985947012901306
Corpus info: num_rows: 3270, num_cols: 30522, row_non_zero_mean: 206.98899841308594, row_sparsity_mean: 0.9932184219360352
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.09%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.76%
Precision@5: 34.06%
Precision@10: 25.98%
Recall@1: 6.09%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.21%
MRR@10: 0.5796
NDCG@10: 0.3613
MAP@100: 0.1827
Primary metric value: 0.3613
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3613
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor
def get_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
# This assumes there is only one bbox per sample as per the general convention
try:
return next(inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes))
except StopIteration:
raise ValueError("No bounding boxes were found in the sample")
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if check_type(inpt, (is_simple_tensor, datapoints.Image, PIL.Image.Image, datapoints.Video))
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_size(inpt))
for inpt in flat_inputs
if check_type(
inpt,
(
is_simple_tensor,
datapoints.Image,
PIL.Image.Image,
datapoints.Video,
datapoints.Mask,
datapoints.BoundingBoxes,
),
)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision import datapoints
from torchvision._utils import sequence_to_str
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_simple_tensor
def query_bounding_boxes(flat_inputs: List[Any]) -> datapoints.BoundingBoxes:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBoxes)]
if not bounding_boxes:
raise TypeError("No bounding boxes were found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes instances in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if check_type(inpt, (is_simple_tensor, datapoints.Image, PIL.Image.Image, datapoints.Video))
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_size(inpt))
for inpt in flat_inputs
if check_type(
inpt,
(
is_simple_tensor,
datapoints.Image,
PIL.Image.Image,
datapoints.Video,
datapoints.Mask,
datapoints.BoundingBoxes,
),
)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
import os
from enum import Enum
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SearchDepth(Enum):
"""Search depth as enumerator."""
BASIC = "basic"
ADVANCED = "advanced"
class TavilySearchAPIRetriever(BaseRetriever):
"""Tavily Search API retriever.
Setup:
Install ``langchain-community`` and set environment variable ``TAVILY_API_KEY``.
.. code-block:: bash
pip install -U langchain-community
export TAVILY_API_KEY="your-api-key"
Key init args:
k: int
Number of results to include.
include_generated_answer: bool
Include a generated answer with results
include_raw_content: bool
Include raw content with results.
include_images: bool
Return images in addition to text.
Instantiate:
.. code-block:: python
from langchain_community.retrievers import TavilySearchAPIRetriever
retriever = TavilySearchAPIRetriever(k=3)
Usage:
.. code-block:: python
query = "what year was breath of the wild released?"
retriever.invoke(query)
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("how many units did bretch of the wild sell in 2020")
""" # noqa: E501
k: int = 10
include_generated_answer: bool = False
include_raw_content: bool = False
include_images: bool = False
search_depth: SearchDepth = SearchDepth.BASIC
include_domains: Optional[List[str]] = None
exclude_domains: Optional[List[str]] = None
kwargs: Optional[Dict[str, Any]] = {}
api_key: Optional[str] = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
try:
try:
from tavily import TavilyClient
except ImportError:
# Older of tavily used Client
from tavily import Client as TavilyClient
except ImportError:
raise ImportError(
"Tavily python package not found. "
"Please install it with `pip install tavily-python`."
)
tavily = TavilyClient(api_key=self.api_key or os.environ["TAVILY_API_KEY"])
max_results = self.k if not self.include_generated_answer else self.k - 1
response = tavily.search(
query=query,
max_results=max_results,
search_depth=self.search_depth.value,
include_answer=self.include_generated_answer,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
include_raw_content=self.include_raw_content,
include_images=self.include_images,
**self.kwargs,
)
docs = [
Document(
page_content=result.get("content", "")
if not self.include_raw_content
else (result.get("raw_content") or ""),
metadata={
"title": result.get("title", ""),
"source": result.get("url", ""),
**{
k: v
for k, v in result.items()
if k not in ("content", "title", "url", "raw_content")
},
"images": response.get("images"),
},
)
for result in response.get("results")
]
if self.include_generated_answer:
docs = [
Document(
page_content=response.get("answer", ""),
metadata={
"title": "Suggested Answer",
"source": "https://tavily.com/",
},
),
*docs,
]
return docs
|
import os
from enum import Enum
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SearchDepth(Enum):
"""Search depth as enumerator."""
BASIC = "basic"
ADVANCED = "advanced"
class TavilySearchAPIRetriever(BaseRetriever):
"""Tavily Search API retriever.
Setup:
Install ``langchain-community`` and set environment variable ``TAVILY_API_KEY``.
.. code-block:: bash
pip install -U langchain-community
export TAVILY_API_KEY="your-api-key"
Key init args:
k: int
Number of results to include.
include_generated_answer: bool
Include a generated answer with results
include_raw_content: bool
Include raw content with results.
include_images: bool
Return images in addition to text.
Instantiate:
.. code-block:: python
from langchain_community.retrievers import TavilySearchAPIRetriever
retriever = TavilySearchAPIRetriever(k=3)
Usage:
.. code-block:: python
query = "what year was breath of the wild released?"
retriever.invoke(query)
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("how many units did bretch of the wild sell in 2020")
""" # noqa: E501
k: int = 10
include_generated_answer: bool = False
include_raw_content: bool = False
include_images: bool = False
search_depth: SearchDepth = SearchDepth.BASIC
include_domains: Optional[List[str]] = None
exclude_domains: Optional[List[str]] = None
kwargs: Optional[Dict[str, Any]] = {}
api_key: Optional[str] = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
try:
try:
from tavily import TavilyClient
except ImportError:
# Older of tavily used Client
from tavily import Client as TavilyClient
except ImportError:
raise ImportError(
"Tavily python package not found. "
"Please install it with `pip install tavily-python`."
)
tavily = TavilyClient(api_key=self.api_key or os.environ["TAVILY_API_KEY"])
max_results = self.k if not self.include_generated_answer else self.k - 1
response = tavily.search(
query=query,
max_results=max_results,
search_depth=self.search_depth.value,
include_answer=self.include_generated_answer,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
include_raw_content=self.include_raw_content,
include_images=self.include_images,
**self.kwargs,
)
docs = [
Document(
page_content=result.get("content", "")
if not self.include_raw_content
else result.get("raw_content", ""),
metadata={
"title": result.get("title", ""),
"source": result.get("url", ""),
**{
k: v
for k, v in result.items()
if k not in ("content", "title", "url", "raw_content")
},
"images": response.get("images"),
},
)
for result in response.get("results")
]
if self.include_generated_answer:
docs = [
Document(
page_content=response.get("answer", ""),
metadata={
"title": "Suggested Answer",
"source": "https://tavily.com/",
},
),
*docs,
]
return docs
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super(LayerNorm, self).__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
if TYPE_CHECKING:
from docarray import BaseDocument
def _is_access_path_valid(doc_type: Type['BaseDocument'], access_path: str) -> bool:
"""
Check if a given access path ("__"-separated) is a valid path for a given Document class.
"""
field_type = _get_field_type_by_access_path(doc_type, access_path)
return field_type is not None
def _all_access_paths_valid(
doc_type: Type['BaseDocument'], access_paths: List[str]
) -> List[bool]:
"""
Check if all access paths ("__"-separated) are valid for a given Document class.
"""
return [_is_access_path_valid(doc_type, path) for path in access_paths]
def _access_path_to_dict(access_path: str, value) -> Dict[str, Any]:
"""
Convert an access path ("__"-separated) and its value to a (potentially) nested dict.
EXAMPLE USAGE
.. code-block:: python
assert access_path_to_dict('image__url', 'img.png') == {'image': {'url': 'img.png'}}
"""
fields = access_path.split('__')
for field in reversed(fields):
result = {field: value}
value = result
return result
def _access_path_dict_to_nested_dict(access_path2val: Dict[str, Any]) -> Dict[Any, Any]:
"""
Convert a dict, where the keys are access paths ("__"-separated) to a nested dictionary.
EXAMPLE USAGE
.. code-block:: python
access_path2val = {'image__url': 'some.png'}
assert access_path_dict_to_nested_dict(access_path2val) == {
'image': {'url': 'some.png'}
}
:param access_path2val: dict with access_paths as keys
:return: nested dict where the access path keys are split into separate field names and nested keys
"""
nested_dict: Dict[Any, Any] = {}
for access_path, value in access_path2val.items():
field2val = _access_path_to_dict(
access_path=access_path,
value=value if value not in ['', 'None'] else None,
)
_update_nested_dicts(to_update=nested_dict, update_with=field2val)
return nested_dict
def _dict_to_access_paths(d: dict) -> Dict[str, Any]:
"""
Convert a (nested) dict to a Dict[access_path, value].
Access paths are defined as a path of field(s) separated by "__".
EXAMPLE USAGE
.. code-block:: python
assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'}
"""
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = _dict_to_access_paths(v)
for nested_k, nested_v in v.items():
new_key = '__'.join([k, nested_k])
result[new_key] = nested_v
else:
result[k] = v
return result
def _update_nested_dicts(
to_update: Dict[Any, Any], update_with: Dict[Any, Any]
) -> None:
"""
Update a dict with another one, while considering shared nested keys.
EXAMPLE USAGE:
.. code-block:: python
d1 = {'image': {'tensor': None}, 'title': 'hello'}
d2 = {'image': {'url': 'some.png'}}
update_nested_dicts(d1, d2)
assert d1 == {'image': {'tensor': None, 'url': 'some.png'}, 'title': 'hello'}
:param to_update: dict that should be updated
:param update_with: dict to update with
:return: merged dict
"""
for k, v in update_with.items():
if k not in to_update.keys():
to_update[k] = v
else:
_update_nested_dicts(to_update[k], update_with[k])
def _get_field_type_by_access_path(
doc_type: Type['BaseDocument'], access_path: str
) -> Optional[Type]:
"""
Get field type by "__"-separated access path.
:param doc_type: type of document
:param access_path: "__"-separated access path
:return: field type of accessed attribute. If access path is invalid, return None.
"""
from docarray import BaseDocument, DocumentArray
field, _, remaining = access_path.partition('__')
field_valid = field in doc_type.__fields__.keys()
if field_valid:
if len(remaining) == 0:
return doc_type._get_field_type(field)
else:
d = doc_type._get_field_type(field)
if issubclass(d, DocumentArray):
return _get_field_type_by_access_path(d.document_type, remaining)
elif issubclass(d, BaseDocument):
return _get_field_type_by_access_path(d, remaining)
else:
return None
else:
return None
|
from typing import TYPE_CHECKING, Any, Dict, List, Type
if TYPE_CHECKING:
from docarray import BaseDocument
def _is_access_path_valid(doc_type: Type['BaseDocument'], access_path: str) -> bool:
"""
Check if a given access path ("__"-separated) is a valid path for a given Document class.
"""
from docarray import BaseDocument
field, _, remaining = access_path.partition('__')
if len(remaining) == 0:
return access_path in doc_type.__fields__.keys()
else:
valid_field = field in doc_type.__fields__.keys()
if not valid_field:
return False
else:
d = doc_type._get_field_type(field)
if not issubclass(d, BaseDocument):
return False
else:
return _is_access_path_valid(d, remaining)
def _all_access_paths_valid(
doc_type: Type['BaseDocument'], access_paths: List[str]
) -> List[bool]:
"""
Check if all access paths ("__"-separated) are valid for a given Document class.
"""
return [_is_access_path_valid(doc_type, path) for path in access_paths]
def _access_path_to_dict(access_path: str, value) -> Dict[str, Any]:
"""
Convert an access path ("__"-separated) and its value to a (potentially) nested dict.
EXAMPLE USAGE
.. code-block:: python
assert access_path_to_dict('image__url', 'img.png') == {'image': {'url': 'img.png'}}
"""
fields = access_path.split('__')
for field in reversed(fields):
result = {field: value}
value = result
return result
def _access_path_dict_to_nested_dict(access_path2val: Dict[str, Any]) -> Dict[Any, Any]:
"""
Convert a dict, where the keys are access paths ("__"-separated) to a nested dictionary.
EXAMPLE USAGE
.. code-block:: python
access_path2val = {'image__url': 'some.png'}
assert access_path_dict_to_nested_dict(access_path2val) == {
'image': {'url': 'some.png'}
}
:param access_path2val: dict with access_paths as keys
:return: nested dict where the access path keys are split into separate field names and nested keys
"""
nested_dict: Dict[Any, Any] = {}
for access_path, value in access_path2val.items():
field2val = _access_path_to_dict(
access_path=access_path,
value=value if value not in ['', 'None'] else None,
)
_update_nested_dicts(to_update=nested_dict, update_with=field2val)
return nested_dict
def _dict_to_access_paths(d: dict) -> Dict[str, Any]:
"""
Convert a (nested) dict to a Dict[access_path, value].
Access paths are defined as a path of field(s) separated by "__".
EXAMPLE USAGE
.. code-block:: python
assert dict_to_access_paths({'image': {'url': 'img.png'}}) == {'image__url', 'img.png'}
"""
result = {}
for k, v in d.items():
if isinstance(v, dict):
v = _dict_to_access_paths(v)
for nested_k, nested_v in v.items():
new_key = '__'.join([k, nested_k])
result[new_key] = nested_v
else:
result[k] = v
return result
def _update_nested_dicts(
to_update: Dict[Any, Any], update_with: Dict[Any, Any]
) -> None:
"""
Update a dict with another one, while considering shared nested keys.
EXAMPLE USAGE:
.. code-block:: python
d1 = {'image': {'tensor': None}, 'title': 'hello'}
d2 = {'image': {'url': 'some.png'}}
update_nested_dicts(d1, d2)
assert d1 == {'image': {'tensor': None, 'url': 'some.png'}, 'title': 'hello'}
:param to_update: dict that should be updated
:param update_with: dict to update with
:return: merged dict
"""
for k, v in update_with.items():
if k not in to_update.keys():
to_update[k] = v
else:
_update_nested_dicts(to_update[k], update_with[k])
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
mlvl_anchors = [torch.ones(2, 5 * 5, 4)]
img_shape = None
scale_factor = [0.5, 0.5]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self._get_bboxes(
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=rescale)
|
import re
from typing import Dict
MISTRALAI_MODELS: Dict[str, int] = {
"mistral-tiny": 32000,
"mistral-small": 32000,
"mistral-medium": 32000,
"mistral-large": 131000,
"mistral-saba-latest": 32000,
"open-mixtral-8x7b": 32000,
"open-mistral-7b": 32000,
"open-mixtral-8x22b": 64000,
"mistral-small-latest": 32000,
"mistral-medium-latest": 32000,
"mistral-large-latest": 32000,
"codestral-latest": 256000,
"open-mistral-nemo-latest": 131000,
"ministral-8b-latest": 131000,
"ministral-3b-latest": 131000,
"pixtral-large-latest": 131000,
"pixtral-12b-2409": 131000,
"magistral-medium-2506": 40000,
"magistral-small-2506": 40000,
"magistral-medium-latest": 40000,
"magistral-small-latest": 40000,
}
MISTRALAI_FUNCTION_CALLING_MODELS = (
"mistral-large-latest",
"open-mixtral-8x22b",
"ministral-8b-latest",
"ministral-3b-latest",
"mistral-small-latest",
"codestral-latest",
"open-mistral-nemo-latest",
"pixtral-large-latest",
"pixtral-12b-2409",
"magistral-medium-2506",
"magistral-small-2506",
"magistral-medium-latest",
"magistral-small-latest",
)
MISTRAL_AI_REASONING_MODELS = (
"magistral-medium-2506",
"magistral-small-2506",
"magistral-medium-latest",
"magistral-small-latest",
)
MISTRALAI_CODE_MODELS = "codestral-latest"
THINKING_REGEX = re.compile(r"^<think>\n(.*?)\n</think>\n")
THINKING_START_REGEX = re.compile(r"^<think>\n")
def mistralai_modelname_to_contextsize(modelname: str) -> int:
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname not in MISTRALAI_MODELS:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
"Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
)
return MISTRALAI_MODELS[modelname]
def is_mistralai_function_calling_model(modelname: str) -> bool:
return modelname in MISTRALAI_FUNCTION_CALLING_MODELS
def is_mistralai_code_model(modelname: str) -> bool:
return modelname in MISTRALAI_CODE_MODELS
|
from typing import Dict
MISTRALAI_MODELS: Dict[str, int] = {
"mistral-tiny": 32000,
"mistral-small": 32000,
"mistral-medium": 32000,
"mistral-large": 131000,
"mistral-saba-latest": 32000,
"open-mixtral-8x7b": 32000,
"open-mistral-7b": 32000,
"open-mixtral-8x22b": 64000,
"mistral-small-latest": 32000,
"mistral-medium-latest": 32000,
"mistral-large-latest": 32000,
"codestral-latest": 256000,
"open-mistral-nemo-latest": 131000,
"ministral-8b-latest": 131000,
"ministral-3b-latest": 131000,
"pixtral-large-latest": 131000,
"pixtral-12b-2409": 131000,
}
MISTRALAI_FUNCTION_CALLING_MODELS = (
"mistral-large-latest",
"open-mixtral-8x22b",
"ministral-8b-latest",
"ministral-3b-latest",
"mistral-small-latest",
"codestral-latest",
"open-mistral-nemo-latest",
"pixtral-large-latest",
"pixtral-12b-2409",
)
MISTRALAI_CODE_MODELS = "codestral-latest"
def mistralai_modelname_to_contextsize(modelname: str) -> int:
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname not in MISTRALAI_MODELS:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
"Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
)
return MISTRALAI_MODELS[modelname]
def is_mistralai_function_calling_model(modelname: str) -> bool:
return modelname in MISTRALAI_FUNCTION_CALLING_MODELS
def is_mistralai_code_model(modelname: str) -> bool:
return modelname in MISTRALAI_CODE_MODELS
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import AnchorHead
class TestAnchorHead(TestCase):
def test_anchor_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import AnchorHead
class TestAnchorHead(TestCase):
def test_anchor_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticsearchStore
from langchain_community.vectorstores.elasticsearch import (
ApproxRetrievalStrategy,
BaseRetrievalStrategy,
ExactRetrievalStrategy,
SparseRetrievalStrategy,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ApproxRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ExactRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"SparseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ElasticsearchStore": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ApproxRetrievalStrategy",
"BaseRetrievalStrategy",
"ElasticsearchStore",
"ExactRetrievalStrategy",
"SparseRetrievalStrategy",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticsearchStore
from langchain_community.vectorstores.elasticsearch import (
ApproxRetrievalStrategy,
BaseRetrievalStrategy,
ExactRetrievalStrategy,
SparseRetrievalStrategy,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ApproxRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ExactRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"SparseRetrievalStrategy": "langchain_community.vectorstores.elasticsearch",
"ElasticsearchStore": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseRetrievalStrategy",
"ApproxRetrievalStrategy",
"ExactRetrievalStrategy",
"SparseRetrievalStrategy",
"ElasticsearchStore",
]
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocLists in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocArrays in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py'
train_cfg = dict(max_epochs=36)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"HunyuanVideoLoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
HunyuanVideoLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
from typing import TYPE_CHECKING
from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate
from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available
def text_encoder_lora_state_dict(text_encoder):
deprecate(
"text_encoder_load_state_dict in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
state_dict = {}
for name, module in text_encoder_attn_modules(text_encoder):
for k, v in module.q_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v
for k, v in module.k_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v
for k, v in module.v_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v
for k, v in module.out_proj.lora_linear_layer.state_dict().items():
state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v
return state_dict
if is_transformers_available():
def text_encoder_attn_modules(text_encoder):
deprecate(
"text_encoder_attn_modules in `models`",
"0.27.0",
"`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.",
)
from transformers import CLIPTextModel, CLIPTextModelWithProjection
attn_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for i, layer in enumerate(text_encoder.text_model.encoder.layers):
name = f"text_model.encoder.layers.{i}.self_attn"
mod = layer.self_attn
attn_modules.append((name, mod))
else:
raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")
return attn_modules
_import_structure = {}
if is_torch_available():
_import_structure["single_file_model"] = ["FromOriginalModelMixin"]
_import_structure["unet"] = ["UNet2DConditionLoadersMixin"]
_import_structure["utils"] = ["AttnProcsLayers"]
if is_transformers_available():
_import_structure["single_file"] = ["FromSingleFileMixin"]
_import_structure["lora_pipeline"] = [
"AmusedLoraLoaderMixin",
"StableDiffusionLoraLoaderMixin",
"SD3LoraLoaderMixin",
"StableDiffusionXLLoraLoaderMixin",
"LTXVideoLoraLoaderMixin",
"LoraLoaderMixin",
"FluxLoraLoaderMixin",
"CogVideoXLoraLoaderMixin",
"Mochi1LoraLoaderMixin",
"SanaLoraLoaderMixin",
]
_import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"]
_import_structure["ip_adapter"] = ["IPAdapterMixin"]
_import_structure["peft"] = ["PeftAdapterMixin"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if is_torch_available():
from .single_file_model import FromOriginalModelMixin
from .unet import UNet2DConditionLoadersMixin
from .utils import AttnProcsLayers
if is_transformers_available():
from .ip_adapter import IPAdapterMixin
from .lora_pipeline import (
AmusedLoraLoaderMixin,
CogVideoXLoraLoaderMixin,
FluxLoraLoaderMixin,
LoraLoaderMixin,
LTXVideoLoraLoaderMixin,
Mochi1LoraLoaderMixin,
SanaLoraLoaderMixin,
SD3LoraLoaderMixin,
StableDiffusionLoraLoaderMixin,
StableDiffusionXLLoraLoaderMixin,
)
from .single_file import FromSingleFileMixin
from .textual_inversion import TextualInversionLoaderMixin
from .peft import PeftAdapterMixin
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS
from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,
annealing_cos)
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
"""YOLOX learning rate scheme.
There are two main differences between YOLOXLrUpdaterHook
and CosineAnnealingLrUpdaterHook.
1. When the current running epoch is greater than
`max_epoch-last_epoch`, a fixed learning rate will be used
2. The exp warmup scheme is different with LrUpdaterHook in MMCV
Args:
num_last_epochs (int): The number of epochs with a fixed learning rate
before the end of the training.
"""
def __init__(self, num_last_epochs, **kwargs):
self.num_last_epochs = num_last_epochs
super(YOLOXLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
def _get_warmup_lr(cur_iters, regular_lr):
# exp warmup scheme
k = self.warmup_ratio * pow(
(cur_iters + 1) / float(self.warmup_iters), 2)
warmup_lr = [_lr * k for _lr in regular_lr]
return warmup_lr
if isinstance(self.base_lr, dict):
lr_groups = {}
for key, base_lr in self.base_lr.items():
lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)
return lr_groups
else:
return _get_warmup_lr(cur_iters, self.base_lr)
def get_lr(self, runner, base_lr):
last_iter = len(runner.data_loader) * self.num_last_epochs
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
progress += 1
if self.min_lr_ratio is not None:
target_lr = base_lr * self.min_lr_ratio
else:
target_lr = self.min_lr
if progress >= max_progress - last_iter:
# fixed learning rate
return target_lr
else:
return annealing_cos(
base_lr, target_lr, (progress - self.warmup_iters) /
(max_progress - self.warmup_iters - last_iter))
|
from mmcv.runner.hooks import HOOKS
from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,
annealing_cos)
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
"""YOLOX learning rate scheme.
There are two main differences between YOLOXLrUpdaterHook
and CosineAnnealingLrUpdaterHook.
1. When the current running epoch is greater than
`max_epoch-last_epoch`, a fixed learning rate will be used
2. The exp warmup scheme is different with LrUpdaterHook in MMCV
Args:
num_last_epochs (int): The number of epochs with a fixed learning rate
before the end of the training.
"""
def __init__(self, num_last_epochs, **kwargs):
self.num_last_epochs = num_last_epochs
super(YOLOXLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
def _get_warmup_lr(cur_iters, regular_lr):
# exp warmup scheme
k = self.warmup_ratio * pow(
(cur_iters + 1) / float(self.warmup_iters), 2)
warmup_lr = [_lr * k for _lr in regular_lr]
return warmup_lr
if isinstance(self.base_lr, dict):
lr_groups = {}
for key, base_lr in self.base_lr.items():
lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)
return lr_groups
else:
return _get_warmup_lr(cur_iters, self.base_lr)
def get_lr(self, runner, base_lr):
last_iter = len(runner.data_loader) * self.num_last_epochs
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
progress += 1
if self.min_lr_ratio is not None:
target_lr = base_lr * self.min_lr_ratio
else:
target_lr = self.min_lr
if progress >= max_progress - last_iter:
# fixed learning rate
return target_lr
else:
return annealing_cos(
base_lr, target_lr, (progress - self.warmup_iters) /
(max_progress - self.warmup_iters - last_iter))
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseCosineSimilarityLoss(model),
document_regularizer_weight=5e-5,
use_document_regularizer_only=True,
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseCosineSimilarityLoss(model),
corpus_regularizer_weight=5e-5,
use_corpus_regularizer_only=True,
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'CLASSES':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# PALETTE is a list of color tuples, which is used for visualization.
'PALETTE': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.16.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.15.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
import warnings
from sys import platform
from typing import Optional
import torch
import torchaudio
dict_format = {
torch.uint8: "u8",
torch.int16: "s16",
torch.int32: "s32",
torch.int64: "s64",
torch.float32: "flt",
torch.float64: "dbl",
}
def play_audio(
waveform: torch.Tensor,
sample_rate: Optional[float],
device: Optional[str] = None,
) -> None:
"""Plays audio through specified or available output device.
.. warning::
This function is currently only supported on MacOS, and requires
libavdevice (FFmpeg) with ``audiotoolbox`` output device.
.. note::
This function can play up to two audio channels.
Args:
waveform: Tensor containing the audio to play.
Expected shape: `(time, num_channels)`.
sample_rate: Sample rate of the audio to play.
device: Output device to use. If None, the default device is used.
"""
if platform == "darwin":
device = device or "audiotoolbox"
path = "-"
else:
raise ValueError(f"This function only supports MacOS, but current OS is {platform}")
available_devices = list(torchaudio.utils.ffmpeg_utils.get_output_devices().keys())
if device not in available_devices:
raise ValueError(f"Device {device} is not available. Available devices are: {available_devices}")
if waveform.dtype not in dict_format:
raise ValueError(f"Unsupported type {waveform.dtype}. The list of supported types is: {dict_format.keys()}")
format = dict_format[waveform.dtype]
if waveform.ndim != 2:
raise ValueError(f"Expected 2D tensor with shape `(time, num_channels)`, got {waveform.ndim}D tensor instead")
time, num_channels = waveform.size()
if num_channels > 2:
warnings.warn(
f"Expected up to 2 channels, got {num_channels} channels instead. "
"Only the first 2 channels will be played.",
stacklevel=2,
)
# Write to speaker device
s = torchaudio.io.StreamWriter(dst=path, format=device)
s.add_audio_stream(sample_rate, num_channels, format=format)
# write audio to the device
block_size = 256
with s.open():
for i in range(0, time, block_size):
s.write_audio_chunk(0, waveform[i : i + block_size, :])
|
import warnings
from sys import platform
from typing import Optional
import torch
import torchaudio
dict_format = {
torch.uint8: "u8",
torch.int16: "s16",
torch.int32: "s32",
torch.int64: "s64",
torch.float32: "flt",
torch.float64: "dbl",
}
@torchaudio._extension.fail_if_no_ffmpeg
def play_audio(
waveform: torch.Tensor,
sample_rate: Optional[float],
device: Optional[str] = None,
) -> None:
"""Plays audio through specified or available output device.
.. warning::
This function is currently only supported on MacOS, and requires
libavdevice (FFmpeg) with ``audiotoolbox`` output device.
.. note::
This function can play up to two audio channels.
Args:
waveform: Tensor containing the audio to play.
Expected shape: `(time, num_channels)`.
sample_rate: Sample rate of the audio to play.
device: Output device to use. If None, the default device is used.
"""
if platform == "darwin":
device = device or "audiotoolbox"
path = "-"
else:
raise ValueError(f"This function only supports MacOS, but current OS is {platform}")
available_devices = list(torchaudio.utils.ffmpeg_utils.get_output_devices().keys())
if device not in available_devices:
raise ValueError(f"Device {device} is not available. Available devices are: {available_devices}")
if waveform.dtype not in dict_format:
raise ValueError(f"Unsupported type {waveform.dtype}. The list of supported types is: {dict_format.keys()}")
format = dict_format[waveform.dtype]
if waveform.ndim != 2:
raise ValueError(f"Expected 2D tensor with shape `(time, num_channels)`, got {waveform.ndim}D tensor instead")
time, num_channels = waveform.size()
if num_channels > 2:
warnings.warn(
f"Expected up to 2 channels, got {num_channels} channels instead. Only the first 2 channels will be played."
)
# Write to speaker device
s = torchaudio.io.StreamWriter(dst=path, format=device)
s.add_audio_stream(sample_rate, num_channels, format=format)
# write audio to the device
block_size = 256
with s.open():
for i in range(0, time, block_size):
s.write_audio_chunk(0, waveform[i : i + block_size, :])
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import load_weights
from keras.src.saving.saving_api import save_model
from keras.src.saving.saving_api import save_weights
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import load_weights
from keras.src.saving.saving_api import save_model
from keras.src.saving.saving_api import save_weights
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
|
"""Interface for tools."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, tool
class InvalidTool(BaseTool):
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
def _run(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join(list(available_tool_names))
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
async def _arun(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join(list(available_tool_names))
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
|
"""Interface for tools."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, tool
class InvalidTool(BaseTool):
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
def _run(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
async def _arun(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
|
"""Analytics API"""
import logging
from typing import Annotated
import fastapi
import pydantic
import backend.data.analytics
from backend.server.utils import get_user_id
router = fastapi.APIRouter()
logger = logging.getLogger(__name__)
class LogRawMetricRequest(pydantic.BaseModel):
metric_name: str = pydantic.Field(..., min_length=1)
metric_value: float = pydantic.Field(..., allow_inf_nan=False)
data_string: str = pydantic.Field(..., min_length=1)
@router.post(path="/log_raw_metric")
async def log_raw_metric(
user_id: Annotated[str, fastapi.Depends(get_user_id)],
request: LogRawMetricRequest,
):
try:
result = await backend.data.analytics.log_raw_metric(
user_id=user_id,
metric_name=request.metric_name,
metric_value=request.metric_value,
data_string=request.data_string,
)
return result.id
except Exception as e:
logger.exception(
"Failed to log metric %s for user %s: %s", request.metric_name, user_id, e
)
raise fastapi.HTTPException(
status_code=500,
detail={
"message": str(e),
"hint": "Check analytics service connection and retry.",
},
)
@router.post("/log_raw_analytics")
async def log_raw_analytics(
user_id: Annotated[str, fastapi.Depends(get_user_id)],
type: Annotated[str, fastapi.Body(..., embed=True)],
data: Annotated[
dict,
fastapi.Body(..., embed=True, description="The data to log"),
],
data_index: Annotated[
str,
fastapi.Body(
...,
embed=True,
description="Indexable field for any count based analytical measures like page order clicking, tutorial step completion, etc.",
),
],
):
try:
result = await backend.data.analytics.log_raw_analytics(
user_id, type, data, data_index
)
return result.id
except Exception as e:
logger.exception("Failed to log analytics for user %s: %s", user_id, e)
raise fastapi.HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Ensure analytics DB is reachable."},
)
|
"""Analytics API"""
import logging
from typing import Annotated
import fastapi
import backend.data.analytics
from backend.server.utils import get_user_id
router = fastapi.APIRouter()
logger = logging.getLogger(__name__)
@router.post(path="/log_raw_metric")
async def log_raw_metric(
user_id: Annotated[str, fastapi.Depends(get_user_id)],
metric_name: Annotated[str, fastapi.Body(..., embed=True)],
metric_value: Annotated[float, fastapi.Body(..., embed=True)],
data_string: Annotated[str, fastapi.Body(..., embed=True)],
):
try:
result = await backend.data.analytics.log_raw_metric(
user_id=user_id,
metric_name=metric_name,
metric_value=metric_value,
data_string=data_string,
)
return result.id
except Exception as e:
logger.exception(
"Failed to log metric %s for user %s: %s", metric_name, user_id, e
)
raise fastapi.HTTPException(
status_code=500,
detail={
"message": str(e),
"hint": "Check analytics service connection and retry.",
},
)
@router.post("/log_raw_analytics")
async def log_raw_analytics(
user_id: Annotated[str, fastapi.Depends(get_user_id)],
type: Annotated[str, fastapi.Body(..., embed=True)],
data: Annotated[
dict,
fastapi.Body(..., embed=True, description="The data to log"),
],
data_index: Annotated[
str,
fastapi.Body(
...,
embed=True,
description="Indexable field for any count based analytical measures like page order clicking, tutorial step completion, etc.",
),
],
):
try:
result = await backend.data.analytics.log_raw_analytics(
user_id, type, data, data_index
)
return result.id
except Exception as e:
logger.exception("Failed to log analytics for user %s: %s", user_id, e)
raise fastapi.HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Ensure analytics DB is reachable."},
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import SyncBuffersHook
class TestSyncBuffersHook:
def test_sync_buffers_hook(self):
runner = Mock()
runner.model = Mock()
hook = SyncBuffersHook()
hook._after_epoch(runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import SyncBuffersHook
class TestSyncBuffersHook:
def test_sync_buffers_hook(self):
Runner = Mock()
Runner.model = Mock()
Hook = SyncBuffersHook()
Hook._after_epoch(Runner)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling the tensor data of a [`PointCloud3D`][docarray.documents.point_cloud.PointCloud3D] object.
A PointsAndColors Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the points in 3D space information (`PointsAndColors.points`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the points' color information (`PointsAndColors.colors`)
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
# Copyright (c) OpenMMLab. All rights reserved.
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available'
]
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.generator = self.backend.random.SeedGenerator()
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
AUDIO_FILE_FORMATS = ['wav']
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_audio_extension = any(ext in url for ext in AUDIO_FILE_FORMATS)
if not has_audio_extension:
raise ValueError(
f'Audio URL must have one of the following extensions:'
f'{AUDIO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T) -> np.ndarray:
"""
Load the data from the url into an AudioNdArray.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
|
import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
AUDIO_FILE_FORMATS = ['wav']
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_audio_extension = any(ext in url for ext in AUDIO_FILE_FORMATS)
if not has_audio_extension:
raise ValueError(
f'Audio URL must have one of the following extensions:'
f'{AUDIO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, dtype: str = 'float32') -> AudioNdArray:
"""
Load the data from the url into an AudioNdArray.
:param dtype: Data-type of the returned array; default: float32.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
import io
file: Union[io.BytesIO, T]
if self.startswith('http'):
import requests
resp = requests.get(self)
resp.raise_for_status()
file = io.BytesIO()
file.write(resp.content)
file.seek(0)
else:
file = self
# note wave is Python built-in mod. https://docs.python.org/3/library/wave.html
with wave.open(file) as ifile:
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(dtype=dtype)
# Normalise float32 array so that values are between -1.0 and +1.0
audio_norm = audio_as_np_float32 / MAX_INT_16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_norm) / channels), channels))
audio_stereo[:, 0] = audio_norm[range(0, len(audio_norm), 2)]
audio_stereo[:, 1] = audio_norm[range(1, len(audio_norm), 2)]
return parse_obj_as(AudioNdArray, audio_stereo)
else:
return parse_obj_as(AudioNdArray, audio_norm)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
from typing import Annotated
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: Annotated[str | None, Query(min_length=3)]):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from typing import Annotated
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: Annotated[str | None, Query(min_length=3)] = ...):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
import random
import pytest
from jina import Document, DocumentArray
@pytest.fixture
def documents_chunk():
document_array = DocumentArray()
document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1})
for i in range(0, 10):
chunk = Document()
for j in range(0, 10):
match = Document(
tags={
'level': 'chunk',
}
)
match.scores['cosine'] = random.random()
match.parent_id = i
chunk.matches.append(match)
document.chunks.append(chunk)
document_array.extend([document])
return document_array
@pytest.fixture
def documents_chunk_chunk():
document_array = DocumentArray()
document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1})
for i in range(0, 10):
chunk = Document()
for j in range(0, 10):
chunk_chunk = Document()
for k in range(0, 10):
match = Document(
tags={
'level': 'chunk',
}
)
match.scores['cosine'] = random.random()
match.parent_id = j
chunk_chunk.matches.append(match)
chunk.chunks.append(chunk_chunk)
document.chunks.append(chunk)
document_array.extend([document])
return document_array
|
import random
import pytest
from jina import DocumentArray, Document
@pytest.fixture
def documents_chunk():
document_array = DocumentArray()
document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1})
for i in range(0, 10):
chunk = Document()
for j in range(0, 10):
match = Document(
tags={
'level': 'chunk',
}
)
match.scores['cosine'] = random.random()
match.parent_id = i
chunk.matches.append(match)
document.chunks.append(chunk)
document_array.extend([document])
return document_array
@pytest.fixture
def documents_chunk_chunk():
document_array = DocumentArray()
document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1})
for i in range(0, 10):
chunk = Document()
for j in range(0, 10):
chunk_chunk = Document()
for k in range(0, 10):
match = Document(
tags={
'level': 'chunk',
}
)
match.scores['cosine'] = random.random()
match.parent_id = j
chunk_chunk.matches.append(match)
chunk.chunks.append(chunk_chunk)
document.chunks.append(chunk)
document_array.extend([document])
return document_array
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available, is_mps_available)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available'
]
|
"""Class for a VectorStore-backed memory object."""
from collections.abc import Sequence
from typing import Any, Optional, Union
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStoreRetriever
from pydantic import Field
from langchain.memory.chat_memory import BaseMemory
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class VectorStoreRetrieverMemory(BaseMemory):
"""Store the conversation history in a vector store and retrieves the relevant
parts of past conversation based on the input.
"""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
exclude_input_keys: Sequence[str] = Field(default_factory=tuple)
"""Input keys to exclude in addition to memory key when constructing the document"""
@property
def memory_variables(self) -> list[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _documents_to_memory_variables(
self, docs: list[Document]
) -> dict[str, Union[list[Document], str]]:
result: Union[list[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def load_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, Union[list[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.invoke(query)
return self._documents_to_memory_variables(docs)
async def aload_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, Union[list[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = await self.retriever.ainvoke(query)
return self._documents_to_memory_variables(docs)
def _form_documents(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> list[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
await self.retriever.aadd_documents(documents)
def clear(self) -> None:
"""Nothing to clear."""
async def aclear(self) -> None:
"""Nothing to clear."""
|
"""Class for a VectorStore-backed memory object."""
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStoreRetriever
from pydantic import Field
from langchain.memory.chat_memory import BaseMemory
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class VectorStoreRetrieverMemory(BaseMemory):
"""Store the conversation history in a vector store and retrieves the relevant
parts of past conversation based on the input.
"""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
exclude_input_keys: Sequence[str] = Field(default_factory=tuple)
"""Input keys to exclude in addition to memory key when constructing the document"""
@property
def memory_variables(self) -> List[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _documents_to_memory_variables(
self, docs: List[Document]
) -> Dict[str, Union[List[Document], str]]:
result: Union[List[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def load_memory_variables(
self, inputs: Dict[str, Any]
) -> Dict[str, Union[List[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.invoke(query)
return self._documents_to_memory_variables(docs)
async def aload_memory_variables(
self, inputs: Dict[str, Any]
) -> Dict[str, Union[List[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = await self.retriever.ainvoke(query)
return self._documents_to_memory_variables(docs)
def _form_documents(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> List[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
async def asave_context(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
await self.retriever.aadd_documents(documents)
def clear(self) -> None:
"""Nothing to clear."""
async def aclear(self) -> None:
"""Nothing to clear."""
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import torch
from torch import nn, Tensor
from typing import Any, Iterable, Dict
from sentence_transformers.util import fullname
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
import torch
from torch import nn, Tensor
from typing import Any, Iterable, Dict
from sentence_transformers.util import fullname
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
def __init__(self, model: SentenceTransformer, loss_fct=nn.MSELoss(), cos_score_transformation=nn.Identity()):
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
:param model: SentenceTransformer model
:param loss_fct: Which pytorch loss function should be used to compare the ``cosine_similarity(u, v)`` with the input_label?
By default, MSE is used: ``||input_label - cosine_sim(u, v)||_2``
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity.
By default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, InputExample, losses
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)
]
train_batch_size = 1
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> Dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from __future__ import annotations
import logging
import numpy as np
from torch.utils.data import IterableDataset
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: list[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
Args:
examples (List[InputExample]): A list of InputExamples.
samples_per_label (int, optional): The number of consecutive, random, and unique samples drawn per label.
The batch size should be a multiple of samples_per_label. Defaults to 2.
with_replacement (bool, optional): If True, each sample is drawn at most once (depending on the total number
of samples per label). If False, one sample can be drawn in multiple draws, but not multiple times in
the same drawing. Defaults to False.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
f"SentenceLabelDataset: {len(examples)} examples, from which {len(self.grouped_inputs)} examples could be used (those labels appeared at least {self.samples_per_label} times). {num_labels} different labels found."
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
from __future__ import annotations
import logging
import numpy as np
from torch.utils.data import IterableDataset
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
"""
This dataset can be used for some specific Triplet Losses like BATCH_HARD_TRIPLET_LOSS which requires
multiple examples with the same label in a batch.
It draws n consecutive, random and unique samples from one label at a time. This is repeated for each label.
Labels with fewer than n unique samples are ignored.
This also applied to drawing without replacement, once less than n samples remain for a label, it is skipped.
This *DOES NOT* check if there are more labels than the batch is large or if the batch size is divisible
by the samples drawn per label.
"""
def __init__(self, examples: list[InputExample], samples_per_label: int = 2, with_replacement: bool = False):
"""
Creates a LabelSampler for a SentenceLabelDataset.
Args:
examples (List[InputExample]): A list of InputExamples.
samples_per_label (int, optional): The number of consecutive, random, and unique samples drawn per label.
The batch size should be a multiple of samples_per_label. Defaults to 2.
with_replacement (bool, optional): If True, each sample is drawn at most once (depending on the total number
of samples per label). If False, one sample can be drawn in multiple draws, but not multiple times in
the same drawing. Defaults to False.
"""
super().__init__()
self.samples_per_label = samples_per_label
# Group examples by label
label2ex = {}
for example in examples:
if example.label not in label2ex:
label2ex[example.label] = []
label2ex[example.label].append(example)
# Include only labels with at least 2 examples
self.grouped_inputs = []
self.groups_right_border = []
num_labels = 0
for label, label_examples in label2ex.items():
if len(label_examples) >= self.samples_per_label:
self.grouped_inputs.extend(label_examples)
self.groups_right_border.append(
len(self.grouped_inputs)
) # At which position does this label group / bucket end?
num_labels += 1
self.label_range = np.arange(num_labels)
self.with_replacement = with_replacement
np.random.shuffle(self.label_range)
logger.info(
"SentenceLabelDataset: {} examples, from which {} examples could be used (those labels appeared at least {} times). {} different labels found.".format(
len(examples), len(self.grouped_inputs), self.samples_per_label, num_labels
)
)
def __iter__(self):
label_idx = 0
count = 0
already_seen = {}
while count < len(self.grouped_inputs):
label = self.label_range[label_idx]
if label not in already_seen:
already_seen[label] = set()
left_border = 0 if label == 0 else self.groups_right_border[label - 1]
right_border = self.groups_right_border[label]
if self.with_replacement:
selection = np.arange(left_border, right_border)
else:
selection = [i for i in np.arange(left_border, right_border) if i not in already_seen[label]]
if len(selection) >= self.samples_per_label:
for element_idx in np.random.choice(selection, self.samples_per_label, replace=False):
count += 1
already_seen[label].add(element_idx)
yield self.grouped_inputs[element_idx]
label_idx += 1
if label_idx >= len(self.label_range):
label_idx = 0
already_seen = {}
np.random.shuffle(self.label_range)
def __len__(self):
return len(self.grouped_inputs)
|
# mypy: allow-untyped-defs
import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically afer the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self): # type: ignore[override]
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.state_dict()
optim_state_dict["step"] = self.averager.step
return optim_state_dict
def load_state_dict(self, state_dict):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(state_dict)
if "step" in state_dict:
self.averager.step = state_dict["step"]
else:
warnings.warn(
"Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0."
)
self.averager.step = 0
def step(self): # type: ignore[override]
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
|
# mypy: allow-untyped-defs
import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically after the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self): # type: ignore[override]
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def state_dict(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.state_dict()
optim_state_dict["step"] = self.averager.step
return optim_state_dict
def load_state_dict(self, state_dict):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(state_dict)
if "step" in state_dict:
self.averager.step = state_dict["step"]
else:
warnings.warn(
"Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0."
)
self.averager.step = 0
def step(self): # type: ignore[override]
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocArrayStacked[MyDoc](docs)._storage
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
import numpy as np
from docarray import BaseDocument
from docarray.array import DocumentArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDocument):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocumentArrayStacked[MyDoc](docs)._storage
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
from .document import DocumentArray
from .storage.qdrant import StorageMixins, QdrantConfig
__all__ = ['DocumentArrayQdrant', 'QdrantConfig']
class DocumentArrayQdrant(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `Qdrant <https://weaviate.io/>`_ vector search engine.
.. note::
This DocumentArray requires `qdrant-client`. You can install it via `pip install "docarray[qdrant]"`.
To use Qdrant as storage backend, a Qdrant service needs to be running on your machine.
With this implementation, :meth:`match` and :meth:`find` perform fast (approximate) vector search.
Additionally, search with filters is supported.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# connect to running Qdrant service with default configuration (address: http://localhost:6333)
da = DocumentArray(storage='qdrant', config={'n_dim': 10})
# connect to a previously persisted DocumentArrayWeaviate by specifying collection_name, host, and port
da = DocumentArray(
storage='qdrant',
config={
'collection_name': 'persisted',
'host': 'localhost',
'port': '6333',
'n_dim': 10,
},
)
.. seealso::
For further details, see our :ref:`user guide <qdrant>`.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayQdrant`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayQdrant` object
"""
return super().__new__(cls)
|
from .document import DocumentArray
from .storage.qdrant import StorageMixins, QdrantConfig
__all__ = ['DocumentArrayQdrant', 'QdrantConfig']
class DocumentArrayQdrant(StorageMixins, DocumentArray):
"""This is a :class:`DocumentArray` that uses Qdrant as
vector search engine and storage.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayQdrant`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayQdrant` object
"""
return super().__new__(cls)
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for traceback_utils."""
import sys
import traceback
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import traceback_utils
class TracebackUtilsTest(test.TestCase):
def assert_trace_line_count(self, fn, count, filtering_enabled=True):
trace_line_count = -1
if filtering_enabled:
traceback_utils.enable_traceback_filtering()
else:
traceback_utils.disable_traceback_filtering()
self.assertEqual(
traceback_utils.is_traceback_filtering_enabled(), filtering_enabled)
try:
fn()
except Exception as e: # pylint: disable=broad-except
# We must count lines rather than frames because autograph transforms
# stack frames into a single large string
trace = '\n'.join(traceback.format_tb(e.__traceback__))
trace_line_count = len(trace.split('\n'))
self.assertGreater(trace_line_count, 0)
if filtering_enabled:
if sys.version_info >= (3, 13):
self.assertLessEqual(trace_line_count, count)
else:
self.assertLess(trace_line_count, count)
else:
self.assertGreater(trace_line_count, count)
def test_eager_add(self):
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
_ = x + y
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_add(self):
@def_function.function
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
return x + y
self.assert_trace_line_count(fn, count=10, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_div(self):
@def_function.function
def wrapped_fn(x):
return x / 0.
def fn():
wrapped_fn(0.5)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_eager_argmax(self):
def fn():
_ = math_ops.argmax([0, 1], axis=2)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_tfn_argmax(self):
@def_function.function
def wrapped_fn(x):
return math_ops.argmax(x, axis=2)
def fn():
wrapped_fn([0, 1])
if sys.version_info >= (3, 13):
self.assert_trace_line_count(fn, count=16, filtering_enabled=True)
else:
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_variable_constructor(self):
def fn():
_ = variables.Variable()
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for traceback_utils."""
import traceback
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import traceback_utils
class TracebackUtilsTest(test.TestCase):
def assert_trace_line_count(self, fn, count, filtering_enabled=True):
trace_line_count = -1
if filtering_enabled:
traceback_utils.enable_traceback_filtering()
else:
traceback_utils.disable_traceback_filtering()
self.assertEqual(
traceback_utils.is_traceback_filtering_enabled(), filtering_enabled)
try:
fn()
except Exception as e: # pylint: disable=broad-except
# We must count lines rather than frames because autograph transforms
# stack frames into a single large string
trace = '\n'.join(traceback.format_tb(e.__traceback__))
trace_line_count = len(trace.split('\n'))
self.assertGreater(trace_line_count, 0)
if filtering_enabled:
self.assertLess(trace_line_count, count)
else:
self.assertGreater(trace_line_count, count)
def test_eager_add(self):
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
_ = x + y
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_add(self):
@def_function.function
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
return x + y
self.assert_trace_line_count(fn, count=10, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_div(self):
@def_function.function
def wrapped_fn(x):
return x / 0.
def fn():
wrapped_fn(0.5)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_eager_argmax(self):
def fn():
_ = math_ops.argmax([0, 1], axis=2)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_tfn_argmax(self):
@def_function.function
def wrapped_fn(x):
return math_ops.argmax(x, axis=2)
def fn():
wrapped_fn([0, 1])
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_variable_constructor(self):
def fn():
_ = variables.Variable()
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestTridentRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'tridentnet/tridentnet_r50-caffe_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_shared_head)
def test_trident_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head_cfg = copy.deepcopy(self.roi_head_cfg)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
# When `test_branch_idx == 1`
roi_head.predict(feats, proposals_list, batch_data_samples)
# When `test_branch_idx == -1`
roi_head_cfg.test_branch_idx = -1
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
roi_head.predict(feats, proposals_list, batch_data_samples)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestTridentRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'tridentnet/tridentnet_r50_caffe_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_shared_head)
def test_trident_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head_cfg = copy.deepcopy(self.roi_head_cfg)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
# When `test_branch_idx == 1`
roi_head.predict(feats, proposals_list, batch_data_samples)
# When `test_branch_idx == -1`
roi_head_cfg.test_branch_idx = -1
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
roi_head.predict(feats, proposals_list, batch_data_samples)
|
"""Gaussian process based regression and classification."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from . import kernels
from ._gpc import GaussianProcessClassifier
from ._gpr import GaussianProcessRegressor
__all__ = ["GaussianProcessClassifier", "GaussianProcessRegressor", "kernels"]
|
"""Gaussian process based regression and classification."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from . import kernels
from ._gpc import GaussianProcessClassifier
from ._gpr import GaussianProcessRegressor
__all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"]
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .dataset import *
from .fileio import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .fileio import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.core import SampleList
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.core import SampleList
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor, batch_data_samples: SampleList,
**kwargs) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
**kwargs)
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList,
**kwargs) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
**kwargs)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True,
**kwargs) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale,
**kwargs)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
def __post_init__(self):
assert self.generator is not None, "generator must be specified"
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
def __post_init__(self):
assert self.generator is not None, "generator must be specified"
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _generate_examples(self):
for idx, ex in enumerate(self.config.generator(**self.config.gen_kwargs)):
yield idx, ex
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.cur_dataloader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.cur_dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.cur_dataloader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.cur_dataloader.batch_sampler.sampler.set_epoch(runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_epoch(self, runner) -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.data_loader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
]
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import gzip
import json
import os
import torch
import tqdm
from transformers import T5ForConditionalGeneration, T5Tokenizer
from sentence_transformers import util
paragraphs = set()
# We use the Wikipedia articles of certain programming languages
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
paragraphs.add(p)
paragraphs = list(paragraphs)
print("Paragraphs:", len(paragraphs))
# Now we load the model that is able to generate queries given a paragraph.
# This model was trained on the MS MARCO dataset, a dataset with 500k
# queries from Bing and the respective relevant passage
tokenizer = T5Tokenizer.from_pretrained("BeIR/query-gen-msmarco-t5-large-v1")
model = T5ForConditionalGeneration.from_pretrained("BeIR/query-gen-msmarco-t5-large-v1")
model.eval()
# Select the device
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# Parameters for generation
batch_size = 8 # Batch size
num_queries = 5 # Number of queries to generate for every paragraph
max_length_paragraph = 300 # Max length for paragraph
max_length_query = 64 # Max length for output query
# Now for every paragraph in our corpus, we generate the queries
with open("generated_queries.tsv", "w") as fOut:
for start_idx in tqdm.trange(0, len(paragraphs), batch_size):
sub_paragraphs = paragraphs[start_idx : start_idx + batch_size]
inputs = tokenizer.prepare_seq2seq_batch(
sub_paragraphs, max_length=max_length_paragraph, truncation=True, return_tensors="pt"
).to(device)
outputs = model.generate(
**inputs, max_length=max_length_query, do_sample=True, top_p=0.95, num_return_sequences=num_queries
)
for idx, out in enumerate(outputs):
query = tokenizer.decode(out, skip_special_tokens=True)
para = sub_paragraphs[int(idx / num_queries)]
fOut.write("{}\t{}\n".format(query.replace("\t", " ").strip(), para.replace("\t", " ").strip()))
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for sematic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import json
import gzip
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import tqdm
import os
from sentence_transformers import util
paragraphs = set()
# We use the Wikipedia articles of certain programming languages
corpus_filepath = 'wiki-programmming-20210101.jsonl.gz'
if not os.path.exists(corpus_filepath):
util.http_get('https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz', corpus_filepath)
with gzip.open(corpus_filepath, 'rt') as fIn:
for line in fIn:
data = json.loads(line.strip())
for p in data['paragraphs']:
if len(p) > 100: #Only take paragraphs with at least 100 chars
paragraphs.add(p)
paragraphs = list(paragraphs)
print("Paragraphs:", len(paragraphs))
# Now we load the model that is able to generate queries given a paragraph.
# This model was trained on the MS MARCO dataset, a dataset with 500k
# queries from Bing and the respective relevant passage
tokenizer = T5Tokenizer.from_pretrained('BeIR/query-gen-msmarco-t5-large-v1')
model = T5ForConditionalGeneration.from_pretrained('BeIR/query-gen-msmarco-t5-large-v1')
model.eval()
#Select the device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# Parameters for generation
batch_size = 8 #Batch size
num_queries = 5 #Number of queries to generate for every paragraph
max_length_paragraph = 300 #Max length for paragraph
max_length_query = 64 #Max length for output query
# Now for every paragraph in our corpus, we generate the queries
with open('generated_queries.tsv', 'w') as fOut:
for start_idx in tqdm.trange(0, len(paragraphs), batch_size):
sub_paragraphs = paragraphs[start_idx:start_idx+batch_size]
inputs = tokenizer.prepare_seq2seq_batch(sub_paragraphs, max_length=max_length_paragraph, truncation=True, return_tensors='pt').to(device)
outputs = model.generate(
**inputs,
max_length=max_length_query,
do_sample=True,
top_p=0.95,
num_return_sequences=num_queries)
for idx, out in enumerate(outputs):
query = tokenizer.decode(out, skip_special_tokens=True)
para = sub_paragraphs[int(idx/num_queries)]
fOut.write("{}\t{}\n".format(query.replace("\t", " ").strip(), para.replace("\t", " ").strip()))
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
|
from __future__ import annotations
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-0b4079c15bbbd0faf",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-0b4079c15bbbd0faf",
},
"windows-gpu": {
"us-west-2": "ami-0123456bcf4cdfb82",
},
"windows-cpu": {
"us-west-2": "ami-0123456bcf4cdfb82",
},
# Managed by BuildKite
# from https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml
"linux-amd64-cpu": {
"us-west-2": "ami-0083e0ae73c175ec6",
},
"pipeline-loader": {
"us-west-2": "ami-0083e0ae73c175ec6",
},
"linux-arm64-cpu": {
"us-west-2": "ami-0dbf1f9da54222f21",
},
}
STACK_PARAMS = {
"linux-amd64-gpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "g4dn.xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-mgpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "g4dn.12xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "1",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-gpu": {
"InstanceOperatingSystem": "windows",
"InstanceTypes": "g4dn.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-cpu": {
"InstanceOperatingSystem": "windows",
"InstanceTypes": "c5a.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "c5a.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "16",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"pipeline-loader": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "t3a.micro",
"AgentsPerInstance": "1",
"MinSize": "2",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-arm64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "c6g.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
}
COMMON_STACK_PARAMS = {
"BuildkiteAgentTimestampLines": "false",
"BuildkiteWindowsAdministrator": "true",
"AssociatePublicIpAddress": "true",
"ScaleOutForWaitingJobs": "false",
"EnableCostAllocationTags": "true",
"CostAllocationTagName": "CreatedBy",
"ECRAccessPolicy": "full",
"EnableSecretsPlugin": "false",
"EnableECRPlugin": "false",
"EnableDockerLoginPlugin": "false",
"EnableDockerUserNamespaceRemap": "false",
"BuildkiteAgentExperiments": "normalised-upload-paths,resolve-commit-after-checkout",
}
|
AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-070080d04e81c5e39",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-070080d04e81c5e39",
},
"windows-gpu": {
"us-west-2": "ami-07c14abcf529d816a",
},
"windows-cpu": {
"us-west-2": "ami-07c14abcf529d816a",
},
# Managed by BuildKite
# from https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml
"linux-amd64-cpu": {
"us-west-2": "ami-0180f7fb0f07eb0bc",
},
"pipeline-loader": {
"us-west-2": "ami-0180f7fb0f07eb0bc",
},
"linux-arm64-cpu": {
"us-west-2": "ami-00686bdc2043a5505",
},
}
STACK_PARAMS = {
"linux-amd64-gpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "g4dn.xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-mgpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "g4dn.12xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "1",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-gpu": {
"InstanceOperatingSystem": "windows",
"InstanceTypes": "g4dn.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"windows-cpu": {
"InstanceOperatingSystem": "windows",
"InstanceTypes": "c5a.2xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-amd64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "c5a.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "16",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"pipeline-loader": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "t3a.micro",
"AgentsPerInstance": "1",
"MinSize": "2",
"MaxSize": "2",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
"linux-arm64-cpu": {
"InstanceOperatingSystem": "linux",
"InstanceTypes": "c6g.4xlarge",
"AgentsPerInstance": "1",
"MinSize": "0",
"MaxSize": "8",
"OnDemandPercentage": "100",
"ScaleOutFactor": "1.0",
"ScaleInIdlePeriod": "60", # in seconds
},
}
COMMON_STACK_PARAMS = {
"BuildkiteAgentTimestampLines": "false",
"BuildkiteWindowsAdministrator": "true",
"AssociatePublicIpAddress": "true",
"ScaleOutForWaitingJobs": "false",
"EnableCostAllocationTags": "true",
"CostAllocationTagName": "CreatedBy",
"ECRAccessPolicy": "full",
"EnableSecretsPlugin": "false",
"EnableECRPlugin": "false",
"EnableDockerLoginPlugin": "false",
"EnableDockerUserNamespaceRemap": "false",
"BuildkiteAgentExperiments": "normalised-upload-paths,resolve-commit-after-checkout",
}
|
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self._build_at_init()
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
|
from keras.src import backend
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
class BaseGlobalPooling(Layer):
"""Base global pooling layer."""
def __init__(
self, pool_dimensions, data_format=None, keepdims=False, **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.keepdims = keepdims
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
self.built = True
def call(self, inputs):
raise NotImplementedError
def compute_output_shape(self, input_shape):
num_spatial_dims = len(input_shape) - 2
if self.data_format == "channels_last":
if self.keepdims:
return (
(input_shape[0],)
+ (1,) * num_spatial_dims
+ (input_shape[-1],)
)
else:
return (input_shape[0],) + (input_shape[-1],)
else:
if self.keepdims:
return (input_shape[0], input_shape[1]) + (
1,
) * num_spatial_dims
else:
return (input_shape[0], input_shape[1])
def get_config(self):
config = super().get_config()
config.update(
{
"data_format": self.data_format,
"keepdims": self.keepdims,
}
)
return config
|
#!/usr/bin/env python3
"""Evaluate the lightning module by loading the checkpoint, the SentencePiece model, and the global_stats.json.
Example:
python eval.py --model-type tedlium3 --checkpoint-path ./experiments/checkpoints/epoch=119-step=254999.ckpt
--dataset-path ./datasets/tedlium --sp-model-path ./spm_bpe_500.model
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from librispeech.lightning import LibriSpeechRNNTModule
from mustc.lightning import MuSTCRNNTModule
from tedlium3.lightning import TEDLIUM3RNNTModule
logger = logging.getLogger(__name__)
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def run_eval_subset(model, dataloader, subset):
total_edit_distance = 0
total_length = 0
with torch.no_grad():
for idx, (batch, transcripts) in enumerate(dataloader):
actual = transcripts[0]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER for {subset} set: {total_edit_distance / total_length}")
def run_eval(model, model_type):
if model_type == MODEL_TYPE_LIBRISPEECH:
dataloader = model.test_dataloader()
run_eval_subset(model, dataloader, "test")
elif model_type == MODEL_TYPE_TEDLIUM3:
dev_loader = model.dev_dataloader()
test_loader = model.test_dataloader()
run_eval_subset(model, dev_loader, "dev")
run_eval_subset(model, test_loader, "test")
elif model_type == MODEL_TYPE_MUSTC:
dev_loader = model.dev_dataloader()
test_common_loader = model.test_common_dataloader()
test_he_loader = model.test_he_dataloader()
run_eval_subset(model, dev_loader, "dev")
run_eval_subset(model, test_common_loader, "tst-COMMON")
run_eval_subset(model, test_he_loader, "tst-HE")
else:
raise ValueError(f"Encountered unsupported model type {model_type}.")
def get_lightning_module(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return LibriSpeechRNNTModule.load_from_checkpoint(
args.checkpoint_path,
librispeech_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return TEDLIUM3RNNTModule.load_from_checkpoint(
args.checkpoint_path,
tedlium_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_MUSTC:
return MuSTCRNNTModule.load_from_checkpoint(
args.checkpoint_path,
mustc_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--checkpoint-path",
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
if args.use_cuda:
model = model.to(device="cuda")
run_eval(model, args.model_type)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
"""Evaluate the lightning module by loading the checkpoint, the SentencePiece model, and the global_stats.json.
Example:
python eval.py --model-type tedlium3 --checkpoint-path ./experiments/checkpoints/epoch=119-step=254999.ckpt
--dataset-path ./datasets/tedlium --sp-model-path ./spm_bpe_500.model
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC
from librispeech.lightning import LibriSpeechRNNTModule
from mustc.lightning import MuSTCRNNTModule
from tedlium3.lightning import TEDLIUM3RNNTModule
logger = logging.getLogger(__name__)
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def run_eval_subset(model, dataloader, subset):
total_edit_distance = 0
total_length = 0
with torch.no_grad():
for idx, (batch, transcripts) in enumerate(dataloader):
actual = transcripts[0]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER for {subset} set: {total_edit_distance / total_length}")
def run_eval(model, model_type):
if model_type == MODEL_TYPE_LIBRISPEECH:
dataloader = model.test_dataloader()
run_eval_subset(model, dataloader, "test")
elif model_type == MODEL_TYPE_TEDLIUM3:
dev_loader = model.dev_dataloader()
test_loader = model.test_dataloader()
run_eval_subset(model, dev_loader, "dev")
run_eval_subset(model, test_loader, "test")
elif model_type == MODEL_TYPE_MUSTC:
dev_loader = model.dev_dataloader()
test_common_loader = model.test_common_dataloader()
test_he_loader = model.test_he_dataloader()
run_eval_subset(model, dev_loader, "dev")
run_eval_subset(model, test_common_loader, "tst-COMMON")
run_eval_subset(model, test_he_loader, "tst-HE")
else:
raise ValueError(f"Encountered unsupported model type {model_type}.")
def get_lightning_module(args):
if args.model_type == MODEL_TYPE_LIBRISPEECH:
return LibriSpeechRNNTModule.load_from_checkpoint(
args.checkpoint_path,
librispeech_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_TEDLIUM3:
return TEDLIUM3RNNTModule.load_from_checkpoint(
args.checkpoint_path,
tedlium_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
elif args.model_type == MODEL_TYPE_MUSTC:
return MuSTCRNNTModule.load_from_checkpoint(
args.checkpoint_path,
mustc_path=str(args.dataset_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
else:
raise ValueError(f"Encountered unsupported model type {args.model_type}.")
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--model-type", type=str, choices=[MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_TEDLIUM3, MODEL_TYPE_MUSTC], required=True
)
parser.add_argument(
"--checkpoint-path",
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
if args.use_cuda:
model = model.to(device="cuda")
run_eval(model, args.model_type)
if __name__ == "__main__":
cli_main()
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=5,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (10 GPUs) x (5 samples per GPU)
auto_scale_lr = dict(base_batch_size=50)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=5,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, datasets, evaluation, losses
import logging
import os
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = 'roberta-base'
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = 'data/askubuntu'
output_path = 'output/askubuntu-simcse-{}-{}-{}'.format(model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ['text_tokenized.txt.gz', 'dev.txt', 'test.txt', 'train_random.txt']:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get('https://github.com/taolei87/askubuntu/raw/master/'+filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, 'text_tokenized.txt.gz'), 'rt', encoding='utf8') as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: #Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append({
'query': corpus[query_id],
'positive': [corpus[pid] for pid in relevant_id],
'negative': [corpus[pid] for pid in negative_ids]
})
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'dev.txt'))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'test.txt'))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Intialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name='AskUbuntu dev')
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name='AskUbuntu test')
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs*len(train_dataloader)*0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True #If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseCosineSimilarityLoss(model),
corpus_regularizer_weight=5e-5,
use_corpus_regularizer_only=True,
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCosineSimilarityLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from . import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from .backend.common import AudioMetaData
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
def _is_backend_dispatcher_enabled():
import os
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
if _is_backend_dispatcher_enabled():
from ._backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
else:
from .backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
_init_backend()
__all__ = [
"AudioMetaData",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from torchaudio import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
def _is_backend_dispatcher_enabled():
import os
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
if _is_backend_dispatcher_enabled():
from ._backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
else:
from .backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
_init_backend()
__all__ = [
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from typing import Iterator, Dict
class Offset2ID:
def __init__(self, ids=None, list_like=True):
self.ids = ids or []
self._list_like = list_like
def get_id(self, idx):
if not self._list_like:
raise ValueError(
"The offset2id is not enabled for list-like indexes. To avoid this error, configure the "
"`list_like` to True"
)
return self.ids[idx]
def append(self, data):
if self._list_like:
self.ids.append(data)
def extend(self, data):
if self._list_like:
self.ids.extend(data)
def update(self, position, data_id):
if self._list_like:
self.ids[position] = data_id
def delete_by_id(self, _id):
if self._list_like:
del self.ids[self.ids.index(_id)]
def index(self, _id):
if not self._list_like:
raise ValueError(
"The offset2id is not enabled for list-like indexes. To avoid this error, configure the "
"`list_like` to True"
)
return self.ids.index(_id)
def delete_by_offset(self, position):
if self._list_like:
del self.ids[position]
def insert(self, position, data_id):
if self._list_like:
self.ids.insert(position, data_id)
def clear(self):
if self._list_like:
self.ids.clear()
def delete_by_ids(self, ids):
if self._list_like:
ids = set(ids)
self.ids = list(filter(lambda _id: _id not in ids, self.ids))
def update_ids(self, _ids_map: Dict[str, str]):
if self._list_like:
for i in range(len(self.ids)):
if self.ids[i] in _ids_map:
self.ids[i] = _ids_map[self.ids[i]]
def save(self):
pass
def load(self):
pass
def __iter__(self) -> Iterator['str']:
yield from self.ids
def __eq__(self, other):
return self.ids == other.ids
def __len__(self):
return len(self.ids)
|
from typing import Iterator, Dict
class Offset2ID:
def __init__(self, ids=None):
self.ids = ids or []
def get_id(self, idx):
return self.ids[idx]
def append(self, data):
self.ids.append(data)
def extend(self, data):
self.ids.extend(data)
def update(self, position, data_id):
self.ids[position] = data_id
def delete_by_id(self, _id):
del self.ids[self.ids.index(_id)]
def index(self, _id):
return self.ids.index(_id)
def delete_by_offset(self, position):
del self.ids[position]
def insert(self, position, data_id):
self.ids.insert(position, data_id)
def clear(self):
self.ids.clear()
def delete_by_ids(self, ids):
ids = set(ids)
self.ids = list(filter(lambda _id: _id not in ids, self.ids))
def update_ids(self, _ids_map: Dict[str, str]):
for i in range(len(self.ids)):
if self.ids[i] in _ids_map:
self.ids[i] = _ids_map[self.ids[i]]
def save(self):
pass
def load(self):
pass
def __iter__(self) -> Iterator['str']:
yield from self.ids
def __eq__(self, other):
return self.ids == other.ids
def __len__(self):
return len(self.ids)
|
import os
from argparse import ArgumentParser
import mmcv
import requests
import torch
from mmengine.structures import InstanceData
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
parser.add_argument(
'--work-dir',
type=str,
default=None,
help='output directory to save drawn results.')
args = parser.parse_args()
return args
def align_ts_output(inputs, metainfo, device):
bboxes = []
labels = []
scores = []
for i, pred in enumerate(inputs):
bboxes.append(pred['bbox'])
labels.append(pred['class_label'])
scores.append(pred['score'])
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.bboxes = torch.tensor(
bboxes, dtype=torch.float32, device=device)
pred_instances.labels = torch.tensor(
labels, dtype=torch.int64, device=device)
pred_instances.scores = torch.tensor(
scores, dtype=torch.float32, device=device)
ts_data_sample = DetDataSample(pred_instances=pred_instances)
return ts_data_sample
def main(args):
register_all_modules()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
pytorch_results = inference_detector(model, args.img)
keep = pytorch_results.pred_instances.scores >= args.score_thr
pytorch_results.pred_instances = pytorch_results.pred_instances[keep]
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
pt_out_file = None
ts_out_file = None
if args.work_dir is not None:
os.makedirs(args.work_dir, exist_ok=True)
pt_out_file = os.path.join(args.work_dir, 'pytorch_result.png')
ts_out_file = os.path.join(args.work_dir, 'torchserve_result.png')
visualizer.add_datasample(
'pytorch_result',
img.copy(),
data_sample=pytorch_results,
draw_gt=False,
out_file=pt_out_file,
show=True,
wait_time=0)
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
metainfo = pytorch_results.pred_instances.metainfo
ts_results = align_ts_output(response.json(), metainfo, args.device)
visualizer.add_datasample(
'torchserve_result',
img,
data_sample=ts_results,
draw_gt=False,
out_file=ts_out_file,
show=True,
wait_time=0)
assert torch.allclose(pytorch_results.pred_instances.bboxes,
ts_results.pred_instances.bboxes)
assert torch.allclose(pytorch_results.pred_instances.labels,
ts_results.pred_instances.labels)
assert torch.allclose(pytorch_results.pred_instances.scores,
ts_results.pred_instances.scores)
if __name__ == '__main__':
args = parse_args()
main(args)
|
from argparse import ArgumentParser
import numpy as np
import requests
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.core import bbox2result
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def parse_result(input, model_class):
bbox = []
label = []
score = []
for anchor in input:
bbox.append(anchor['bbox'])
label.append(model_class.index(anchor['class_name']))
score.append([anchor['score']])
bboxes = np.append(bbox, score, axis=1)
labels = np.array(label)
result = bbox2result(bboxes, labels, len(model_class))
return result
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
model_result = inference_detector(model, args.img)
for i, anchor_set in enumerate(model_result):
anchor_set = anchor_set[anchor_set[:, 4] >= 0.5]
model_result[i] = anchor_set
# show the results
show_result_pyplot(
model,
args.img,
model_result,
score_thr=args.score_thr,
title='pytorch_result')
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
server_result = parse_result(response.json(), model.CLASSES)
show_result_pyplot(
model,
args.img,
server_result,
score_thr=args.score_thr,
title='server_result')
for i in range(len(model.CLASSES)):
assert np.allclose(model_result[i], server_result[i])
if __name__ == '__main__':
args = parse_args()
main(args)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel
from typing_extensions import TypedDict
from docarray import BaseDocument, DocumentArray
from docarray.documents import AudioDoc, ImageDoc, TextDoc
from docarray.documents.helper import create_doc, create_from_typeddict
from docarray.typing import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDocument):
image: ImageDoc
text: TextDoc
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDocument):
text: str
images: DocumentArray[ImageDoc]
doc = ChunksDocument(
text='hello',
images=DocumentArray[ImageDoc]([ImageDoc() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc',
__base__=BaseModel,
image=(ImageDoc, ...),
text=(TextDoc, ...),
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(ImageDoc, ...), text=(TextDoc, ...)
)
assert issubclass(MyMultiModalDoc, BaseDocument)
doc = MyMultiModalDoc(
image=ImageDoc(tensor=np.zeros((3, 224, 224))), text=TextDoc(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, ImageDoc)
assert isinstance(doc.text, TextDoc)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=AudioDoc,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDocument)
assert issubclass(MyAudio, AudioDoc)
def test_create_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: ImageDoc
text: TextDoc
with pytest.raises(ValueError):
_ = create_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDocument)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_from_typeddict(MyAudio, __base__=AudioDoc)
assert issubclass(Doc, BaseDocument)
assert issubclass(Doc, AudioDoc)
|
from typing import Optional
import numpy as np
import pytest
from pydantic import BaseModel
from typing_extensions import TypedDict
from docarray import BaseDocument, DocumentArray
from docarray.documents import Audio, Image, Text
from docarray.documents.helper import create_doc, create_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
def test_multi_modal_doc():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, Image)
assert isinstance(doc.text, Text)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
def test_nested_chunks_document():
class ChunksDocument(BaseDocument):
text: str
images: DocumentArray[Image]
doc = ChunksDocument(
text='hello',
images=DocumentArray[Image]([Image() for _ in range(10)]),
)
assert isinstance(doc.images, DocumentArray)
def test_create_doc():
with pytest.raises(ValueError):
_ = create_doc(
'MyMultiModalDoc', __base__=BaseModel, image=(Image, ...), text=(Text, ...)
)
MyMultiModalDoc = create_doc(
'MyMultiModalDoc', image=(Image, ...), text=(Text, ...)
)
assert issubclass(MyMultiModalDoc, BaseDocument)
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
assert isinstance(doc.image, BaseDocument)
assert isinstance(doc.image, Image)
assert isinstance(doc.text, Text)
assert doc.text.text == 'hello'
assert (doc.image.tensor == np.zeros((3, 224, 224))).all()
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(Optional[AudioNdArray], ...),
)
assert issubclass(MyAudio, BaseDocument)
assert issubclass(MyAudio, Audio)
def test_create_from_typeddict():
class MyMultiModalDoc(TypedDict):
image: Image
text: Text
with pytest.raises(ValueError):
_ = create_from_typeddict(MyMultiModalDoc, __base__=BaseModel)
Doc = create_from_typeddict(MyMultiModalDoc)
assert issubclass(Doc, BaseDocument)
class MyAudio(TypedDict):
title: str
tensor: Optional[AudioNdArray]
Doc = create_from_typeddict(MyAudio, __base__=Audio)
assert issubclass(Doc, BaseDocument)
assert issubclass(Doc, Audio)
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
openvino_skipped_tests = []
if backend() == "openvino":
with open(
"keras/src/backend/openvino/excluded_concrete_tests.txt", "r"
) as file:
openvino_skipped_tests = file.readlines()
# it is necessary to check if stripped line is not empty
# and exclude such lines
openvino_skipped_tests = [
line.strip() for line in openvino_skipped_tests if line.strip()
]
requires_trainable_backend = pytest.mark.skipif(
backend() == "numpy" or backend() == "openvino",
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
# also, skip concrete tests for openvino, listed in the special file
# this is more granular mechanism to exclude tests rather
# than using --ignore option
for skipped_test in openvino_skipped_tests:
if skipped_test in item.nodeid:
item.add_marker(
skip_if_backend(
"openvino",
"Not supported operation by openvino backend",
)
)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
with open(
"keras/src/backend/openvino/excluded_concrete_tests.txt", "r"
) as file:
openvino_skipped_tests = file.readlines()
# it is necessary to check if stripped line is not empty
# and exclude such lines
openvino_skipped_tests = [
line.strip() for line in openvino_skipped_tests if line.strip()
]
requires_trainable_backend = pytest.mark.skipif(
backend() == "numpy" or backend() == "openvino",
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
# also, skip concrete tests for openvino, listed in the special file
# this is more granular mechanism to exclude tests rather
# than using --ignore option
for skipped_test in openvino_skipped_tests:
if skipped_test in item.nodeid:
item.add_marker(
skip_if_backend(
"openvino",
"Not supported operation by openvino backend",
)
)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
import importlib
import threading
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
fsspec.register_implementation(fs_class.protocol, fs_class)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
import importlib
from typing import List
import fsspec
from . import compression
from .hffilesystem import HfFileSystem
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
fsspec.register_implementation(fs_class.protocol, fs_class)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
|
"""
==================================================
Principal Component Analysis (PCA) on Iris Dataset
==================================================
This example shows a well known decomposition technique known as Principal Component
Analysis (PCA) on the
`Iris dataset <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_.
This dataset is made of 4 features: sepal length, sepal width, petal length, petal
width. We use PCA to project this 4 feature space into a 3-dimensional space.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Loading the Iris dataset
# ------------------------
#
# The Iris dataset is directly available as part of scikit-learn. It can be loaded
# using the :func:`~sklearn.datasets.load_iris` function. With the default parameters,
# a :class:`~sklearn.utils.Bunch` object is returned, containing the data, the
# target values, the feature names, and the target names.
from sklearn.datasets import load_iris
iris = load_iris(as_frame=True)
print(iris.keys())
# %%
# Plot of pairs of features of the Iris dataset
# ---------------------------------------------
#
# Let's first plot the pairs of features of the Iris dataset.
import seaborn as sns
# Rename classes using the iris target names
iris.frame["target"] = iris.target_names[iris.target]
_ = sns.pairplot(iris.frame, hue="target")
# %%
# Each data point on each scatter plot refers to one of the 150 iris flowers
# in the dataset, with the color indicating their respective type
# (Setosa, Versicolor, and Virginica).
#
# You can already see a pattern regarding the Setosa type, which is
# easily identifiable based on its short and wide sepal. Only
# considering these two dimensions, sepal width and length, there's still
# overlap between the Versicolor and Virginica types.
#
# The diagonal of the plot shows the distribution of each feature. We observe
# that the petal width and the petal length are the most discriminant features
# for the three types.
#
# Plot a PCA representation
# -------------------------
# Let's apply a Principal Component Analysis (PCA) to the iris dataset
# and then plot the irises across the first three PCA dimensions.
# This will allow us to better differentiate among the three types!
import matplotlib.pyplot as plt
# unused but required import for doing 3d projections with matplotlib < 3.2
import mpl_toolkits.mplot3d # noqa: F401
from sklearn.decomposition import PCA
fig = plt.figure(1, figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d", elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
scatter = ax.scatter(
X_reduced[:, 0],
X_reduced[:, 1],
X_reduced[:, 2],
c=iris.target,
s=40,
)
ax.set(
title="First three PCA dimensions",
xlabel="1st Eigenvector",
ylabel="2nd Eigenvector",
zlabel="3rd Eigenvector",
)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
# Add a legend
legend1 = ax.legend(
scatter.legend_elements()[0],
iris.target_names.tolist(),
loc="upper right",
title="Classes",
)
ax.add_artist(legend1)
plt.show()
# %%
# PCA will create 3 new features that are a linear combination of the 4 original
# features. In addition, this transformation maximizes the variance. With this
# transformation, we see that we can identify each species using only the first feature
# (i.e., first eigenvector).
|
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
# unused but required import for doing 3d projections with matplotlib < 3.2
import mpl_toolkits.mplot3d # noqa: F401
import numpy as np
from sklearn import datasets, decomposition
np.random.seed(5)
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = fig.add_subplot(111, projection="3d", elev=48, azim=134)
ax.set_position([0, 0, 0.95, 1])
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [("Setosa", 0), ("Versicolour", 1), ("Virginica", 2)]:
ax.text3D(
X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(),
name,
horizontalalignment="center",
bbox=dict(alpha=0.5, edgecolor="w", facecolor="w"),
)
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral, edgecolor="k")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
plt.show()
|
import collections
import json
import logging
import os
import string
from typing import Iterable, List
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> List[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from typing import Union, Tuple, List, Iterable, Dict
import collections
import string
import os
import json
import logging
from .WordTokenizer import WordTokenizer, ENGLISH_STOP_WORDS
import nltk
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False, ngram_separator: str = "_", max_ngram_length: int = 5):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str) -> List[int]:
tokens = nltk.word_tokenize(text, preserve_line=True)
#phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx:idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx:idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx:idx + ngram_len] = [ngram.lower()]
idx += 1
#Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, 'phrasetokenizer_config.json'), 'w') as fOut:
json.dump({'vocab': list(self.word2idx.keys()), 'stop_words': list(self.stop_words), 'do_lower_case': self.do_lower_case, 'ngram_separator': self.ngram_separator, 'max_ngram_length': self.max_ngram_length}, fOut)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, 'phrasetokenizer_config.json'), 'r') as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
|
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..base import BaseEstimator, _fit_context
from ..utils._param_validation import Interval
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted, validate_data
from ._base import SelectorMixin
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SelectFromModel: Meta-transformer for selecting features based on
importance weights.
SelectPercentile : Select features according to a percentile of the highest
scores.
SequentialFeatureSelector : Transformer that performs Sequential Feature
Selection.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> from sklearn.feature_selection import VarianceThreshold
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0, None, closed="left")]
}
def __init__(self, threshold=0.0):
self.threshold = threshold
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=np.float64,
ensure_all_finite="allow-nan",
)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
return tags
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from ..base import BaseEstimator, _fit_context
from ..utils._param_validation import Interval
from ..utils.sparsefuncs import mean_variance_axis, min_max_axis
from ..utils.validation import check_is_fitted
from ._base import SelectorMixin
class VarianceThreshold(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SelectFromModel: Meta-transformer for selecting features based on
importance weights.
SelectPercentile : Select features according to a percentile of the highest
scores.
SequentialFeatureSelector : Transformer that performs Sequential Feature
Selection.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> from sklearn.feature_selection import VarianceThreshold
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0, None, closed="left")]
}
def __init__(self, threshold=0.0):
self.threshold = threshold
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
dtype=np.float64,
ensure_all_finite="allow-nan",
)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
return tags
|
"""This module checks if the given python files can be imported without error."""
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception: # noqa: PERF203
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
"""This module checks if the given python files can be imported without error."""
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
from __future__ import annotations
from typing import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='VFNetHead',
num_classes=80,
in_channels=256,
stacked_convs=3,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=None)
# learning rate
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='VFNetHead',
num_classes=80,
in_channels=256,
stacked_convs=3,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=None)
# learning rate
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
from .document import DocumentArray
from .storage.weaviate import StorageMixins, WeaviateConfig
__all__ = ['DocumentArrayWeaviate', 'WeaviateConfig']
class DocumentArrayWeaviate(StorageMixins, DocumentArray):
"""
DocumentArray that stores Documents in a `Weaviate <https://weaviate.io/>`_ vector search engine.
.. note::
This DocumentArray requires `weaviate-client`. You can install it via `pip install "docarray[weaviate]"`.
To use Weaviate as storage backend, a Weaviate service needs to be running on your machine.
With this implementation, :meth:`match` and :meth:`find` perform fast (approximate) vector search.
Additionally, search with filters is supported.
Example usage:
.. code-block:: python
from docarray import DocumentArray
# connect to running Weaviate service with default configuration (address: http://localhost:8080)
da = DocumentArray(storage='weaviate')
# connect to a previously persisted DocumentArrayWeaviate by specifying name, host, and port
da = DocumentArray(
storage='weaviate', config={'name': 'Persisted', 'host': 'localhost', 'port': 1234}
)
.. seealso::
For further details, see our :ref:`user guide <weaviate>`.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayWeaviate`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayWeaviate` object
"""
return super().__new__(cls)
|
from .document import DocumentArray
from .storage.weaviate import StorageMixins, WeaviateConfig
__all__ = ['DocumentArrayWeaviate', 'WeaviateConfig']
class DocumentArrayWeaviate(StorageMixins, DocumentArray):
"""This is a :class:`DocumentArray` that uses Weaviate as
vector search engine and storage.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayWeaviate`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayWeaviate` object
"""
return super().__new__(cls)
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/debug',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/debug',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4))
optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4))
optimizer = dict(type='SGD', lr=0.01)
|
import itertools
from typing import (
TYPE_CHECKING,
Union,
Sequence,
overload,
Any,
List,
)
import numpy as np
from docarray import Document
from docarray.helper import typename
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import (
DocumentArrayIndexType,
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArrayMultipleAttributeType,
DocumentArraySingleAttributeType,
)
from docarray import DocumentArray
class GetItemMixin:
"""Provide helper functions to enable advance indexing in `__getitem__`"""
@overload
def __getitem__(self, index: 'DocumentArraySingletonIndexType') -> 'Document':
...
@overload
def __getitem__(self, index: 'DocumentArrayMultipleIndexType') -> 'DocumentArray':
...
@overload
def __getitem__(self, index: 'DocumentArraySingleAttributeType') -> List[Any]:
...
@overload
def __getitem__(
self, index: 'DocumentArrayMultipleAttributeType'
) -> List[List[Any]]:
...
def __getitem__(
self, index: 'DocumentArrayIndexType'
) -> Union['Document', 'DocumentArray']:
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
return self._get_doc_by_offset(int(index))
elif isinstance(index, str):
is_access_path = index.startswith('@')
if (
is_access_path
and getattr(self, '_subindices', None) is not None
and index in self._subindices
):
return self._subindices[index]
elif is_access_path:
return self.traverse_flat(index[1:])
else:
return self._get_doc_by_id(index)
elif isinstance(index, slice):
from docarray import DocumentArray
return DocumentArray(self._get_docs_by_slice(index))
elif index is Ellipsis:
return self.flatten()
elif isinstance(index, Sequence):
from docarray import DocumentArray
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
return DocumentArray([self[index[0]], self[index[1]]])
else:
_docs = self[index[0]]
if not _docs:
return []
if isinstance(_docs, Document):
return getattr(_docs, index[1])
return _docs._get_attributes(index[1])
elif isinstance(index[0], (slice, Sequence)) or index[0] is Ellipsis:
_docs = self[index[0]]
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
return _docs._get_attributes(*_attrs)
elif isinstance(index[0], bool):
return DocumentArray(itertools.compress(self, index))
elif isinstance(index[0], int):
return DocumentArray(self._get_docs_by_offsets(index))
elif isinstance(index[0], str):
return DocumentArray(self._get_docs_by_ids(index))
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
return self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
import itertools
from typing import (
TYPE_CHECKING,
Union,
Sequence,
overload,
Any,
List,
)
import numpy as np
from docarray import Document
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArrayMultipleAttributeType,
DocumentArraySingleAttributeType,
)
from docarray import DocumentArray
class GetItemMixin:
"""Provide helper functions to enable advance indexing in `__getitem__`"""
@overload
def __getitem__(self, index: 'DocumentArraySingletonIndexType') -> 'Document':
...
@overload
def __getitem__(self, index: 'DocumentArrayMultipleIndexType') -> 'DocumentArray':
...
@overload
def __getitem__(self, index: 'DocumentArraySingleAttributeType') -> List[Any]:
...
@overload
def __getitem__(
self, index: 'DocumentArrayMultipleAttributeType'
) -> List[List[Any]]:
...
def __getitem__(
self, index: 'DocumentArrayIndexType'
) -> Union['Document', 'DocumentArray']:
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
return self._get_doc_by_offset(int(index))
elif isinstance(index, str):
is_access_path = index.startswith('@')
if (
is_access_path
and getattr(self, '_subindices', None) is not None
and index in self._subindices
):
return self._subindices[index]
elif is_access_path:
return self.traverse_flat(index[1:])
else:
return self._get_doc_by_id(index)
elif isinstance(index, slice):
from docarray import DocumentArray
return DocumentArray(self._get_docs_by_slice(index))
elif index is Ellipsis:
return self.flatten()
elif isinstance(index, Sequence):
from docarray import DocumentArray
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
return DocumentArray([self[index[0]], self[index[1]]])
else:
_docs = self[index[0]]
if not _docs:
return []
if isinstance(_docs, Document):
return getattr(_docs, index[1])
return _docs._get_attributes(index[1])
elif isinstance(index[0], (slice, Sequence)) or index[0] is Ellipsis:
_docs = self[index[0]]
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
return _docs._get_attributes(*_attrs)
elif isinstance(index[0], bool):
return DocumentArray(itertools.compress(self, index))
elif isinstance(index[0], int):
return DocumentArray(self._get_docs_by_offsets(index))
elif isinstance(index[0], str):
return DocumentArray(self._get_docs_by_ids(index))
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
return self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.searcher.compound.FaissPostgresIndexer import FaissPostgresIndexer
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
# fixes issue #208 https://github.com/jina-ai/executors/issues/208
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_shards_str(docker_compose):
with Flow().load_config(
"""
jtype: Flow
executors:
- name: text_indexer
shards: 1
uses: FaissPostgresIndexer
uses_with:
startup_sync_args:
only_delta: True
total_shards: 1
"""
) as f:
f.search([Document() for _ in range(20)])
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.searcher.compound.FaissPostgresSearcher import (
FaissPostgresSearcher,
)
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
# fixes issue #208 https://github.com/jina-ai/executors/issues/208
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_shards_str(docker_compose):
with Flow().load_config(
"""
jtype: Flow
executors:
- name: text_indexer
shards: 1
uses: FaissPostgresSearcher
uses_with:
startup_sync_args:
only_delta: True
total_shards: 1
"""
) as f:
f.search([Document() for _ in range(20)])
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_faillure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
# mypy: ignore-errors
"""
This module provides the TorchInductor backend integration for TorchDynamo.
TorchInductor is a compiler backend that generates optimized code for both CPU and GPU.
This module lazily imports and registers the TorchInductor compiler to avoid loading it
into memory when it is not being used. This helps reduce memory overhead when using
other backends.
The inductor backend can be used with torch.compile():
model = torch.compile(model, backend="inductor")
"""
from torch._dynamo import register_backend
from torch._dynamo.utils import dynamo_timed
@register_backend
def inductor(*args, **kwargs):
with dynamo_timed("inductor_import", log_pt2_compile_event=True):
# do import here to avoid loading inductor into memory when it is not used
# The AsyncCompile subproc pool can be slow to start, so warm it up as early
# as possible.
from torch._inductor.async_compile import maybe_warm_pool
maybe_warm_pool()
from torch._inductor.compile_fx import compile_fx
return compile_fx(*args, **kwargs)
|
# mypy: ignore-errors
"""
This module provides the TorchInductor backend integration for TorchDynamo.
TorchInductor is a compiler backend that generates optimized code for both CPU and GPU.
This module lazily imports and registers the TorchInductor compiler to avoid loading it
into memory when it is not being used. This helps reduce memory overhead when using
other backends.
The inductor backend can be used with torch.compile():
model = torch.compile(model, backend="inductor")
"""
from torch._dynamo import register_backend
from torch._dynamo.utils import dynamo_timed
@register_backend
def inductor(*args, **kwargs):
with dynamo_timed("inductor_import", log_pt2_compile_event=True):
# do import here to avoid loading inductor into memory when it is not used
from torch._inductor.compile_fx import compile_fx
return compile_fx(*args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.