input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Set, Tuple, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import (
is_tensor_union,
is_type_tensor,
safe_issubclass,
)
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
from docarray.typing import TensorFlowTensor
else:
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.parametrize(
'type_, cls, is_subclass',
[
(List[str], object, False),
(List[List[int]], object, False),
(Set[str], object, False),
(Dict, object, False),
(Tuple[int, int], object, False),
],
)
def test_safe_issubclass(type_, cls, is_subclass):
assert safe_issubclass(type_, cls) == is_subclass
|
from typing import Dict, List, Optional, Set, Tuple, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import (
is_tensor_union,
is_type_tensor,
safe_issubclass,
)
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
from docarray.typing import TensorFlowTensor
else:
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.parametrize(
'type_, cls, is_subclass',
[
(List[str], object, False),
(List[List[int]], object, False),
(Set[str], object, False),
(Dict, object, False),
(Tuple[int, int], object, False),
],
)
def test_safe_issubclass(type_, cls, is_subclass):
assert safe_issubclass(type_, cls) == is_subclass
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Bros.
"""
from typing import Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class BrosProcessor(ProcessorMixin):
r"""
Constructs a Bros processor which wraps a BERT tokenizer.
[`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
[`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
Args:
tokenizer (`BertTokenizerFast`, *optional*):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["tokenizer"]
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, tokenizer=None, **kwargs):
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(tokenizer)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
"""
This method uses [`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
"""
encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
return encoding
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
return list(dict.fromkeys(tokenizer_input_names))
__all__ = ["BrosProcessor"]
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Bros.
"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class BrosProcessor(ProcessorMixin):
r"""
Constructs a Bros processor which wraps a BERT tokenizer.
[`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
[`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
Args:
tokenizer (`BertTokenizerFast`, *optional*):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["tokenizer"]
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, tokenizer=None, **kwargs):
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(tokenizer)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
"""
This method uses [`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
"""
encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
return encoding
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
return list(dict.fromkeys(tokenizer_input_names))
__all__ = ["BrosProcessor"]
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
preprocess_cfg=preprocess_cfg,
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
_TASKS_TO_MIXTURE = {
"sep_clean": "mix_clean",
"enh_single": "mix_single",
"enh_both": "mix_both",
"sep_noisy": "mix_both",
}
class LibriMix(Dataset):
r"""*LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``"train-360"``, ``"train-100"``,
``"dev"``, and ``"test"``] (Default: ``"train-360"``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): Sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): The task of LibriMix.
Options: [``"enh_single"``, ``"enh_both"``, ``"sep_clean"``, ``"sep_noisy"``]
(Default: ``"sep_clean"``)
mode (str, optional): The mode when creating the mixture. If set to ``"min"``, the lengths of mixture
and sources are the minimum length of all sources. If set to ``"max"``, the lengths of mixture and
sources are zero padded to the maximum length of all sources.
Options: [``"min"``, ``"max"``]
(Default: ``"min"``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
mode: str = "min",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if mode not in ["max", "min"]:
raise ValueError(f'Expect ``mode`` to be one in ["min", "max"]. Found {mode}.')
if sample_rate == 8000:
mix_dir = self.root / "wav8k" / mode / subset
elif sample_rate == 16000:
mix_dir = self.root / "wav16k" / mode / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = mix_dir / _TASKS_TO_MIXTURE[task]
if task == "enh_both":
self.src_dirs = [(mix_dir / "mix_clean")]
else:
self.src_dirs = [(mix_dir / f"s{i+1}") for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*.wav")]
self.files.sort()
def _load_sample(self, key) -> Tuple[int, torch.Tensor, List[torch.Tensor]]:
metadata = self.get_metadata(key)
mixed = _load_waveform(self.root, metadata[1], metadata[0])
srcs = []
for i, path_ in enumerate(metadata[2]):
src = _load_waveform(self.root, path_, metadata[0])
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def get_metadata(self, key: int) -> Tuple[int, str, List[str]]:
"""Get metadata for the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
int:
Sample rate
str:
Path to mixed audio
List of str:
List of paths to source audios
"""
filename = self.files[key]
mixed_path = os.path.relpath(self.mix_dir / filename, self.root)
srcs_paths = []
for dir_ in self.src_dirs:
src = os.path.relpath(dir_ / filename, self.root)
srcs_paths.append(src)
return self.sample_rate, mixed_path, srcs_paths
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> Tuple[int, torch.Tensor, List[torch.Tensor]]:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
int:
Sample rate
Tensor:
Mixture waveform
List of Tensors:
List of source waveforms
"""
return self._load_sample(key)
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
_TASKS_TO_MIXTURE = {
"sep_clean": "mix_clean",
"enh_single": "mix_single",
"enh_both": "mix_both",
"sep_noisy": "mix_both",
}
class LibriMix(Dataset):
r"""*LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``"train-360"``, ``"train-100"``,
``"dev"``, and ``"test"``] (Default: ``"train-360"``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): Sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): The task of LibriMix.
Options: [``"enh_single"``, ``"enh_both"``, ``"sep_clean"``, ``"sep_noisy"``]
(Default: ``"sep_clean"``)
mode (str, optional): The mode when creating the mixture. If set to ``"min"``, the lengths of mixture
and sources are the minimum length of all sources. If set to ``"max"``, the lengths of mixture and
sources are zero padded to the maximum length of all sources.
Options: [``"min"``, ``"max"``]
(Default: ``"min"``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
mode: str = "min",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if mode not in ["max", "min"]:
raise ValueError(f'Expect ``mode`` to be one in ["min", "max"]. Found {mode}.')
if sample_rate == 8000:
self.root = self.root / "wav8k" / mode / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k" / mode / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / _TASKS_TO_MIXTURE[task]).resolve()
if task == "enh_both":
self.src_dirs = [(self.root / "mix_clean")]
else:
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*.wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
int:
Sample rate
Tensor:
Mixture waveform
list of Tensors:
List of source waveforms
"""
return self._load_sample(self.files[key])
|
# coding: utf-8
"""Helper script for checking versions in the dynamic symbol table.
This script checks that LightGBM library is linked to the appropriate symbol versions.
"""
import re
import sys
from pathlib import Path
def check_dependicies(objdump_string: str) -> None:
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : str
The dynamic symbol table entries of the file (result of `objdump -T` command).
"""
GLIBC_version = re.compile(r'0{16}[ \t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \t]+')
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
assert int(major) <= 2
assert int(minor) <= 28
GLIBCXX_version = re.compile(r'0{16}[ \t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \t]+')
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
assert int(major) == 3
assert int(minor) == 4
assert patch == '' or int(patch) <= 22
GOMP_version = re.compile(r'0{16}[ \t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \t]+')
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
assert int(major) <= 4
assert int(minor) <= 5
if __name__ == "__main__":
check_dependicies(Path(sys.argv[1]).read_text(encoding='utf-8'))
|
# coding: utf-8
"""Helper script for checking versions in the dynamic symbol table.
This script checks that LightGBM library is linked to the appropriate symbol versions.
"""
import re
import sys
from pathlib import Path
def check_dependicies(objdump_string: str) -> None:
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : str
The dynamic symbol table entries of the file (result of `objdump -T` command).
"""
GLIBC_version = re.compile(r'0{16}[ \t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \t]+')
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
assert int(major) <= 2
assert int(minor) <= 14
GLIBCXX_version = re.compile(r'0{16}[ \t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \t]+')
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
assert int(major) == 3
assert int(minor) == 4
assert patch == '' or int(patch) <= 19
GOMP_version = re.compile(r'0{16}[ \t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \t]+')
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
assert int(major) == 1
assert int(minor) == 0
if __name__ == "__main__":
check_dependicies(Path(sys.argv[1]).read_text(encoding='utf-8'))
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
user_unsub_link: str | None = None,
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
base_url = (
settings.config.frontend_base_url or settings.config.platform_base_url
)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link=f"{base_url}/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(
user_email=user_email,
user_unsubscribe_link=user_unsub_link,
subject=subject,
body=full_message,
)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(
self,
user_email: str,
subject: str,
body: str,
user_unsubscribe_link: str | None = None,
):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
# Headers default to None internally so this is fine
Headers=(
{
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{user_unsubscribe_link}>",
}
if user_unsubscribe_link
else None
),
)
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link="https://platform.agpt.co/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(user_email, subject, full_message)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(self, user_email: str, subject: str, body: str):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
)
|
import collections
import itertools
import numpy as np
from absl.testing import parameterized
from torch.utils.data import Dataset as TorchDataset
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.module_utils import tensorflow as tf
class MyTorchDataset(TorchDataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.y[index]
class DatasetUtilsTest(test_case.TestCase):
@parameterized.named_parameters(
named_product(
dataset_type=["list", "tuple", "tensorflow", "torch"],
features_shape=[(2,), (100, 2), (10, 10, 2)],
)
)
def test_split_dataset(self, dataset_type, features_shape):
n_sample, left_size, right_size = 100, 0.2, 0.8
features = np.random.sample((n_sample,) + features_shape)
labels = np.random.sample((n_sample, 1))
if dataset_type == "list":
dataset = [features, labels]
elif dataset_type == "tuple":
dataset = (features, labels)
elif dataset_type == "tensorflow":
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
elif dataset_type == "torch":
dataset = MyTorchDataset(features, labels)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
for sample in itertools.chain(dataset_left, dataset_right):
self.assertEqual(sample[0].shape, features_shape)
self.assertEqual(sample[1].shape, (1,))
@parameterized.named_parameters(
named_product(structure_type=["tuple", "dict", "OrderedDict"])
)
def test_split_dataset_nested_structures(self, structure_type):
n_sample, left_size, right_size = 100, 0.2, 0.8
features1 = np.random.sample((n_sample, 2))
features2 = np.random.sample((n_sample, 10, 2))
labels = np.random.sample((n_sample, 1))
if structure_type == "tuple":
dataset = tf.data.Dataset.from_tensor_slices(
((features1, features2), labels)
)
if structure_type == "dict":
dataset = tf.data.Dataset.from_tensor_slices(
{"y": features2, "x": features1, "labels": labels}
)
if structure_type == "OrderedDict":
dataset = tf.data.Dataset.from_tensor_slices(
collections.OrderedDict(
[("y", features2), ("x", features1), ("labels", labels)]
)
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
for sample in itertools.chain(dataset_left, dataset_right):
if structure_type in ("dict", "OrderedDict"):
x, y, labels = sample["x"], sample["y"], sample["labels"]
elif structure_type == "tuple":
(x, y), labels = sample
self.assertEqual(x.shape, (2,))
self.assertEqual(y.shape, (10, 2))
self.assertEqual(labels.shape, (1,))
|
import itertools
import numpy as np
from absl.testing import parameterized
from torch.utils.data import Dataset as TorchDataset
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.module_utils import tensorflow as tf
class MyTorchDataset(TorchDataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.y[index]
class DatasetUtilsTest(test_case.TestCase):
@parameterized.named_parameters(
named_product(
dataset_type=["list", "tuple", "tensorflow", "torch"],
features_shape=[(2,), (100, 2), (10, 10, 2)],
)
)
def test_split_dataset(self, dataset_type, features_shape):
n_sample, left_size, right_size = 100, 0.2, 0.8
features = np.random.sample((n_sample,) + features_shape)
labels = np.random.sample((n_sample, 1))
if dataset_type == "list":
dataset = [features, labels]
elif dataset_type == "tuple":
dataset = (features, labels)
elif dataset_type == "tensorflow":
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
elif dataset_type == "torch":
dataset = MyTorchDataset(features, labels)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
for sample in itertools.chain(dataset_left, dataset_right):
self.assertEqual(sample[0].shape, features_shape)
self.assertEqual(sample[1].shape, (1,))
@parameterized.named_parameters(
named_product(structure_type=["dict", "tuple"])
)
def test_split_dataset_nested_structures(self, structure_type):
n_sample, left_size, right_size = 100, 0.2, 0.8
features1 = np.random.sample((n_sample, 2))
features2 = np.random.sample((n_sample, 10, 2))
labels = np.random.sample((n_sample, 1))
if structure_type == "dict":
dataset = tf.data.Dataset.from_tensor_slices(
{"x1": features1, "x2": features2, "labels": labels}
)
elif structure_type == "tuple":
dataset = tf.data.Dataset.from_tensor_slices(
((features1, features2), labels)
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
for sample in itertools.chain(dataset_left, dataset_right):
if structure_type == "dict":
x1, x2, labels = sample["x1"], sample["x2"], sample["labels"]
elif structure_type == "tuple":
(x1, x2), labels = sample
self.assertEqual(x1.shape, (2,))
self.assertEqual(x2.shape, (10, 2))
self.assertEqual(labels.shape, (1,))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
roi_head=dict(
mask_head=dict(
upsample_cfg=dict(
type='carafe',
scale_factor=2,
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64))))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
roi_head=dict(
mask_head=dict(
upsample_cfg=dict(
type='carafe',
scale_factor=2,
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64))))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
DUMMY_DATASET_SCRIPT = dedent("""\
import datasets
class NewDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first"),
datasets.BuilderConfig(name="second"),
]
DEFAULT_CONFIG_NAME = "first"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({"text": datasets.Value("string")}),
)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _generate_examples(self):
for key in range(5):
yield key, {"text": f"{self.config.name}-{key}"}
""")
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
"""Example of parametrized tests for analytics endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_values_parametrized(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values using parametrize."""
# Mock the analytics function
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
# Better error handling
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
response_data = response.json()
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response_data, "test_case": test_id}, indent=2, sort_keys=True
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"), # Missing all fields
({"metric_name": "test"}, "Field required"), # Missing metric_value
(
{"metric_name": "test", "metric_value": "not_a_number"},
"Input should be a valid number",
), # Invalid type
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
), # Empty name
],
)
def test_log_raw_metric_invalid_requests_parametrized(
mocker: pytest_mock.MockFixture,
invalid_data: dict,
expected_error: str,
) -> None:
"""Test invalid metric requests with parametrize."""
# Mock the analytics function to avoid event loop issues
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=Mock(id="test-id"),
)
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail
# Verify error message contains expected error
error_text = json.dumps(error_detail)
assert expected_error in error_text or expected_error.lower() in error_text.lower()
|
"""Example of parametrized tests for analytics endpoints."""
import json
from unittest.mock import AsyncMock, Mock
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
from pytest_snapshot.plugin import Snapshot
import backend.server.routers.analytics as analytics_routes
from backend.server.conftest import TEST_USER_ID
from backend.server.utils import get_user_id
app = fastapi.FastAPI()
app.include_router(analytics_routes.router)
client = fastapi.testclient.TestClient(app)
def override_get_user_id() -> str:
"""Override get_user_id for testing"""
return TEST_USER_ID
app.dependency_overrides[get_user_id] = override_get_user_id
@pytest.mark.parametrize(
"metric_value,metric_name,data_string,test_id",
[
(100, "api_calls_count", "external_api", "integer_value"),
(0, "error_count", "no_errors", "zero_value"),
(-5.2, "temperature_delta", "cooling", "negative_value"),
(1.23456789, "precision_test", "float_precision", "float_precision"),
(999999999, "large_number", "max_value", "large_number"),
(0.0000001, "tiny_number", "min_value", "tiny_number"),
],
)
def test_log_raw_metric_values_parametrized(
mocker: pytest_mock.MockFixture,
configured_snapshot: Snapshot,
metric_value: float,
metric_name: str,
data_string: str,
test_id: str,
) -> None:
"""Test raw metric logging with various metric values using parametrize."""
# Mock the analytics function
mock_result = Mock(id=f"metric-{test_id}-uuid")
mocker.patch(
"backend.data.analytics.log_raw_metric",
new_callable=AsyncMock,
return_value=mock_result,
)
request_data = {
"metric_name": metric_name,
"metric_value": metric_value,
"data_string": data_string,
}
response = client.post("/log_raw_metric", json=request_data)
# Better error handling
assert response.status_code == 200, f"Failed for {test_id}: {response.text}"
response_data = response.json()
# Snapshot test the response
configured_snapshot.assert_match(
json.dumps(
{"metric_id": response_data, "test_case": test_id}, indent=2, sort_keys=True
),
f"analytics_metric_{test_id}",
)
@pytest.mark.parametrize(
"invalid_data,expected_error",
[
({}, "Field required"), # Missing all fields
({"metric_name": "test"}, "Field required"), # Missing metric_value
(
{"metric_name": "test", "metric_value": "not_a_number"},
"Input should be a valid number",
), # Invalid type
(
{"metric_name": "", "metric_value": 1.0, "data_string": "test"},
"String should have at least 1 character",
), # Empty name
],
)
def test_log_raw_metric_invalid_requests_parametrized(
invalid_data: dict,
expected_error: str,
) -> None:
"""Test invalid metric requests with parametrize."""
response = client.post("/log_raw_metric", json=invalid_data)
assert response.status_code == 422
error_detail = response.json()
assert "detail" in error_detail
# Verify error message contains expected error
error_text = json.dumps(error_detail)
assert expected_error in error_text or expected_error.lower() in error_text.lower()
|
from __future__ import annotations
from typing import Any, cast, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> Mask:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:]))
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: bool = False,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: bool = False,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
perspective_coeffs: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.perspective_mask(self.as_subclass(torch.Tensor), perspective_coeffs, fill=fill)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, cast, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> Mask:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:]))
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self)
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self)
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: bool = False,
) -> Mask:
output = self._F.resize_mask(self, size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self, top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self, output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: bool = False,
) -> Mask:
output = self._F.resized_crop_mask(self, top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self, padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.rotate_mask(self, angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self,
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
perspective_coeffs: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.perspective_mask(self, perspective_coeffs, fill=fill)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self, displacement, fill=fill)
return Mask.wrap_like(self, output)
|
import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform import _RandomApplyTransform
from ._utils import query_chw
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
class Grayscale(Transform):
_transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)
def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None:
deprecation_msg = (
f"The transform `Grayscale(num_output_channels={num_output_channels})` "
f"is deprecated and will be removed in a future release."
)
if num_output_channels == 1:
replacement_msg = (
"transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)"
)
else:
replacement_msg = (
"transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
")"
)
warnings.warn(f"{deprecation_msg} Instead, please use\n\n{replacement_msg}")
super().__init__()
self.num_output_channels = num_output_channels
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels)
if isinstance(inpt, (features.Image, features.Video)):
output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type]
return output
class RandomGrayscale(_RandomApplyTransform):
_transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)
def __init__(self, p: float = 0.1) -> None:
warnings.warn(
"The transform `RandomGrayscale(p=...)` is deprecated and will be removed in a future release. "
"Instead, please use\n\n"
"transforms.RandomApply(\n"
" transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
" )\n"
" p=...,\n"
")"
)
super().__init__(p=p)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_input_channels, *_ = query_chw(flat_inputs)
return dict(num_input_channels=num_input_channels)
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"])
if isinstance(inpt, (features.Image, features.Video)):
output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type]
return output
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform import _RandomApplyTransform
from ._utils import query_chw
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
class Grayscale(Transform):
_transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)
def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None:
deprecation_msg = (
f"The transform `Grayscale(num_output_channels={num_output_channels})` "
f"is deprecated and will be removed in a future release."
)
if num_output_channels == 1:
replacement_msg = (
"transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)"
)
else:
replacement_msg = (
"transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
")"
)
warnings.warn(f"{deprecation_msg} Instead, please use\n\n{replacement_msg}")
super().__init__()
self.num_output_channels = num_output_channels
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels)
if isinstance(inpt, (features.Image, features.Video)):
output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type]
return output
class RandomGrayscale(_RandomApplyTransform):
_transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)
def __init__(self, p: float = 0.1) -> None:
warnings.warn(
"The transform `RandomGrayscale(p=...)` is deprecated and will be removed in a future release. "
"Instead, please use\n\n"
"transforms.RandomApply(\n"
" transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
" )\n"
" p=...,\n"
")"
)
super().__init__(p=p)
def _get_params(self, sample: Any) -> Dict[str, Any]:
num_input_channels, *_ = query_chw(sample)
return dict(num_input_channels=num_input_channels)
def _transform(
self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]
) -> Union[features.ImageType, features.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"])
if isinstance(inpt, (features.Image, features.Video)):
output = inpt.wrap_like(inpt, output, color_space=features.ColorSpace.GRAY) # type: ignore[arg-type]
return output
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from mmengine.config import Config
from mmengine.config.utils import (_get_cfg_metainfo,
_get_external_cfg_base_path,
_get_package_and_cfg_path)
from mmengine.registry import MODELS, DefaultScope
from mmengine.runner import load_checkpoint
from mmengine.utils import get_installed_path, install_package
def get_config(cfg_path: str, pretrained: bool = False) -> Config:
"""Get config from external package.
Args:
cfg_path (str): External relative config path.
pretrained (bool): Whether to save pretrained model path. If
``pretrained==True``, the url of pretrained model can be accessed
by ``cfg.model_path``. Defaults to False.
Examples:
>>> cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> # Equivalent to
>>> # cfg = Config.fromfile('/path/to/faster_rcnn_r50_fpn_1x_coco.py')
>>> cfg.model_path
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
Returns:
Config: A `Config` parsed from external package.
""" # noqa E301
# Get package name and relative config path.
package, cfg_path = _get_package_and_cfg_path(cfg_path)
# Install package if it's not installed.
install_package(package)
package_path = get_installed_path(package)
try:
# Use `cfg_path` to search target config file.
cfg_meta = _get_cfg_metainfo(package_path, cfg_path)
cfg_path = osp.join(package_path, '.mim', cfg_meta['Config'])
cfg = Config.fromfile(cfg_path)
if pretrained:
assert 'Weights' in cfg_meta, ('Cannot find `Weights` in cfg_file'
'.metafile.yml, please check the'
'metafile')
cfg.model_path = cfg_meta['Weights']
except ValueError:
# Since the base config does not contain a metafile, the absolute
# config is `osp.join(package_path, cfg_path_prefix, cfg_name)`
cfg_path = _get_external_cfg_base_path(package_path, cfg_path)
cfg = Config.fromfile(cfg_path)
except Exception as e:
raise e
return cfg
def get_model(cfg_path: str, pretrained: bool = False, **kwargs):
"""Get built model from external package.
Args:
cfg_path (str): External relative config path with prefix
'package::' and without suffix.
pretrained (bool): Whether to load pretrained model. Defaults to False.
kwargs (dict): Default arguments to build model.
Examples:
>>> model = get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> type(model)
<class 'mmdet.models.detectors.faster_rcnn.FasterRCNN'>
Returns:
nn.Module: Built model.
""" # noqa E301
package = cfg_path.split('::')[0]
with DefaultScope.overwrite_default_scope(package): # type: ignore
cfg = get_config(cfg_path, pretrained)
models_module = importlib.import_module(f'{package}.utils')
models_module.register_all_modules() # type: ignore
model = MODELS.build(cfg.model, default_args=kwargs)
if pretrained:
load_checkpoint(model, cfg.model_path)
# Hack to use pretrained weights.
# If we do not set _is_init here, Runner will call
# `model.init_weights()` to overwrite the pretrained model.
model._is_init = True
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from mmengine.config import Config
from mmengine.config.utils import (_get_cfg_metainfo,
_get_external_cfg_base_path,
_get_package_and_cfg_path)
from mmengine.registry import MODELS, DefaultScope
from mmengine.runner import load_checkpoint
from mmengine.utils import get_installed_path, install_package
def get_config(cfg_path: str, pretrained: bool = False) -> Config:
"""Get config from external package.
Args:
cfg_path (str): External relative config path.
pretrained (bool): Whether to save pretrained model path. If
``pretrained==True``, the url of pretrained model can be accessed
by ``cfg.model_path``. Defaults to False.
Examples:
>>> cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> # Equivalent to
>>> # cfg = Config.fromfile('/path/to/faster_rcnn_r50_fpn_1x_coco.py')
>>> cfg.model_path
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
Returns:
Config: A `Config` parsed from external package.
""" # noqa E301
# Get package name and relative config path.
package, cfg_path = _get_package_and_cfg_path(cfg_path)
# Install package if it's not installed.
install_package(package)
package_path = get_installed_path(package)
try:
# Use `cfg_path` to search target config file.
cfg_meta = _get_cfg_metainfo(package_path, cfg_path)
cfg_path = osp.join(package_path, '.mim', cfg_meta['Config'])
cfg = Config.fromfile(cfg_path)
if pretrained:
assert 'Weights' in cfg_meta, ('Cannot find `Weights` in cfg_file'
'.metafile.yml, please check the'
'metafile')
cfg.model_path = cfg_meta['Weights']
except ValueError:
# Since the base config does not contain a metafile, the absolute
# config is `osp.join(package_path, cfg_path_prefix, cfg_name)`
cfg_path = _get_external_cfg_base_path(package_path, cfg_path)
cfg = Config.fromfile(cfg_path)
except Exception as e:
raise e
return cfg
def get_model(cfg_path: str, pretrained: bool = False, **kwargs):
"""Get built model from external package.
Args:
cfg_path (str): External relative config path with prefix
'package::' and without suffix.
pretrained (bool): Whether to load pretrained model. Defaults to False.
kwargs (dict): Default arguments to build model.
Examples:
>>> model = get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> type(model)
<class 'mmdet.models.detectors.faster_rcnn.FasterRCNN'>
Returns:
nn.Module: Built model.
""" # noqa E301
package = cfg_path.split('::')[0]
with DefaultScope.overwrite_default_scope(package): # type: ignore
cfg = get_config(cfg_path, pretrained)
models_module = importlib.import_module(f'{package}.utils')
models_module.register_all_modules() # type: ignore
model = MODELS.build(cfg.model, default_args=kwargs)
if pretrained:
load_checkpoint(model, cfg.model_path)
return model
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
print("Model max length:", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
print("Model max length:", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
from functools import partial
from huggingface_hub import hf_hub_url
from huggingface_hub.utils import get_session, hf_raise_for_status
hf_dataset_url = partial(hf_hub_url, repo_type="dataset")
def check_auth(hf_api, repo_id, token=None):
headers = hf_api._build_hf_headers(token=token)
path = f"{hf_api.endpoint}/api/datasets/{repo_id}/auth-check"
r = get_session().get(path, headers=headers)
hf_raise_for_status(r)
|
from functools import partial
from huggingface_hub import hf_hub_url
hf_dataset_url = partial(hf_hub_url, repo_type="dataset")
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": False},
input_shape=(2, 3, 4),
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_2d(self):
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_3d(self):
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
assert_built_after_instantiation=True,
)
def test_spatial_dropout_1D_dynamic(self):
inputs = layers.Input((3, 2))
layer = layers.SpatialDropout1D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_1D_correctness(self):
inputs = np.ones((10, 3, 10))
layer = layers.SpatialDropout1D(0.5)
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_spatial_dropout_2D_dynamic(self):
inputs = layers.Input((3, 2, 4))
layer = layers.SpatialDropout2D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_2D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3))
layer = layers.SpatialDropout2D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, :], outputs[:, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0], outputs[:, :, 1, 1])
def test_spatial_dropout_3D_dynamic(self):
inputs = layers.Input((3, 2, 4, 2))
layer = layers.SpatialDropout3D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_3D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3, 3))
layer = layers.SpatialDropout3D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, 0, :], outputs[:, 1, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0, 0], outputs[:, :, 1, 1, 1])
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4),
)
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": False},
input_shape=(2, 3, 4),
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_2d(self):
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
)
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_3d(self):
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
)
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
)
def test_spatial_dropout_1D_dynamic(self):
inputs = layers.Input((3, 2))
layer = layers.SpatialDropout1D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_1D_correctness(self):
inputs = np.ones((10, 3, 10))
layer = layers.SpatialDropout1D(0.5)
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_spatial_dropout_2D_dynamic(self):
inputs = layers.Input((3, 2, 4))
layer = layers.SpatialDropout2D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_2D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3))
layer = layers.SpatialDropout2D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, :], outputs[:, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0], outputs[:, :, 1, 1])
def test_spatial_dropout_3D_dynamic(self):
inputs = layers.Input((3, 2, 4, 2))
layer = layers.SpatialDropout3D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_3D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3, 3))
layer = layers.SpatialDropout3D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, 0, :], outputs[:, 1, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0, 0], outputs[:, :, 1, 1, 1])
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=image_size, ratio_range=(0.1, 2.0)),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=25, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=5, metric=['bbox', 'segm'])
# optimizer assumes bs=64
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.067,
step=[22, 24])
runner = dict(type='EpochBasedRunner', max_epochs=25)
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model
from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing
from .squim import (
squim_objective_base,
squim_objective_model,
squim_subjective_base,
squim_subjective_model,
SquimObjective,
SquimSubjective,
)
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conformer_rnnt_biasing",
"conformer_rnnt_biasing_base",
"conv_tasnet_base",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"Hypothesis",
"RNNTBeamSearchBiasing",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
"squim_objective_base",
"squim_objective_model",
"squim_subjective_base",
"squim_subjective_model",
"SquimObjective",
"SquimSubjective",
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder
from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model
from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing
from .squim import squim_objective_base, squim_objective_model, SquimObjective
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"conformer_rnnt_biasing",
"conformer_rnnt_biasing_base",
"conv_tasnet_base",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"Hypothesis",
"RNNTBeamSearchBiasing",
"HiFiGANVocoder",
"hifigan_vocoder_v1",
"hifigan_vocoder_v2",
"hifigan_vocoder_v3",
"hifigan_vocoder",
"squim_objective_base",
"squim_objective_model",
"SquimObjective",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import PointAssigner
class TestPointAssigner(unittest.TestCase):
def test_point_assigner(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_instances.labels = torch.LongTensor([0, 1])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an image might predict no points and no
gt."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import PointAssigner
class TestPointAssigner(unittest.TestCase):
def test_point_assigner(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_instances.labels = torch.LongTensor([0, 1])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an image might predict no points and no
gt."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_reset_parameter_callback_is_picklable(serializer):
params = {
'bagging_fraction': [0.7] * 5 + [0.6] * 5,
'feature_fraction': reset_feature_fraction
}
callback = lgb.reset_parameter(**params)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is True
assert callback.kwargs == callback_from_disk.kwargs
assert callback.kwargs == params
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object, pickle_obj, unpickle_obj
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_reset_parameter_callback_is_picklable(serializer):
params = {
'bagging_fraction': [0.7] * 5 + [0.6] * 5,
'feature_fraction': reset_feature_fraction
}
callback = lgb.reset_parameter(**params)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is True
assert callback.kwargs == callback_from_disk.kwargs
assert callback.kwargs == params
|
"""Loads rich text files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRTFLoader(UnstructuredFileLoader):
"""Load `RTF` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredRTFLoader
loader = UnstructuredRTFLoader(
"example.rtf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rtf
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
file_path = str(file_path)
validate_unstructured_version("0.5.12")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rtf import partition_rtf
return partition_rtf(filename=self.file_path, **self.unstructured_kwargs)
|
"""Loads rich text files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRTFLoader(UnstructuredFileLoader):
"""Load `RTF` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredRTFLoader
loader = UnstructuredRTFLoader(
"example.rtf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rtf
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
file_path = str(file_path)
validate_unstructured_version("0.5.12")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rtf import partition_rtf
return partition_rtf(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import _HAS_GPU_VIDEO_DECODER, read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
import abc
import argparse
import functools
import inspect
from typing import TYPE_CHECKING, Callable, Optional
from jina.helper import convert_tuple_to_list
from jina.jaml import JAMLCompatible
from jina.logging.logger import JinaLogger
from jina.serve.helper import store_init_kwargs, wrap_func
from jina.serve.streamer import GatewayStreamer
__all__ = ['BaseGateway']
if TYPE_CHECKING:
from prometheus_client import CollectorRegistry
class GatewayType(type(JAMLCompatible), type):
"""The class of Gateway type, which is the metaclass of :class:`BaseGateway`."""
def __new__(cls, *args, **kwargs):
"""
# noqa: DAR101
# noqa: DAR102
:return: Gateway class
"""
_cls = super().__new__(cls, *args, **kwargs)
return cls.register_class(_cls)
@staticmethod
def register_class(cls):
"""
Register a class.
:param cls: The class.
:return: The class, after being registered.
"""
reg_cls_set = getattr(cls, '_registered_class', set())
cls_id = f'{cls.__module__}.{cls.__name__}'
if cls_id not in reg_cls_set:
reg_cls_set.add(cls_id)
setattr(cls, '_registered_class', reg_cls_set)
wrap_func(
cls, ['__init__'], store_init_kwargs, taboo={'self', 'args', 'kwargs'}
)
return cls
class BaseGateway(JAMLCompatible, metaclass=GatewayType):
"""
The base class of all custom Gateways, can be used to build a custom interface to a Jina Flow that supports
gateway logic
:class:`jina.Gateway` as an alias for this class.
"""
def __init__(
self,
name: Optional[str] = 'gateway',
**kwargs,
):
"""
:param name: Gateway pod name
:param kwargs: additional extra keyword arguments to avoid failing when extra params ara passed that are not expected
"""
self.streamer = None
self.name = name
# TODO: original implementation also passes args, maybe move this to a setter/initializer func
self.logger = JinaLogger(self.name)
def set_streamer(
self,
args: 'argparse.Namespace' = None,
timeout_send: Optional[float] = None,
metrics_registry: Optional['CollectorRegistry'] = None,
runtime_name: Optional[str] = None,
):
"""
Set streamer object by providing runtime parameters.
:param args: runtime args
:param timeout_send: grpc connection timeout
:param metrics_registry: metric registry when monitoring is enabled
:param runtime_name: name of the runtime providing the streamer
"""
import json
from jina.serve.streamer import GatewayStreamer
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_disable_reduce = json.loads(args.deployments_disable_reduce)
self.streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_disable_reduce=deployments_disable_reduce,
timeout_send=timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=runtime_name,
prefetch=args.prefetch,
logger=self.logger,
metrics_registry=metrics_registry,
)
@abc.abstractmethod
async def setup_server(self):
"""Setup server"""
...
@abc.abstractmethod
async def run_server(self):
"""Run server forever"""
...
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await self.streamer.close()
@abc.abstractmethod
async def stop_server(self):
"""Stop server"""
...
# some servers need to set a flag useful in handling termination signals
# e.g, HTTPGateway/ WebSocketGateway
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
import abc
import argparse
from typing import TYPE_CHECKING, Optional
from jina.jaml import JAMLCompatible
from jina.logging.logger import JinaLogger
from jina.serve.streamer import GatewayStreamer
__all__ = ['BaseGateway']
if TYPE_CHECKING:
from prometheus_client import CollectorRegistry
class BaseGateway(JAMLCompatible):
"""
The base class of all custom Gateways, can be used to build a custom interface to a Jina Flow that supports
gateway logic
:class:`jina.Gateway` as an alias for this class.
"""
def __init__(
self,
name: Optional[str] = 'gateway',
**kwargs,
):
"""
:param name: Gateway pod name
:param kwargs: additional extra keyword arguments to avoid failing when extra params ara passed that are not expected
"""
self.streamer = None
self.name = name
# TODO: original implementation also passes args, maybe move this to a setter/initializer func
self.logger = JinaLogger(self.name)
def set_streamer(
self,
args: 'argparse.Namespace' = None,
timeout_send: Optional[float] = None,
metrics_registry: Optional['CollectorRegistry'] = None,
runtime_name: Optional[str] = None,
):
"""
Set streamer object by providing runtime parameters.
:param args: runtime args
:param timeout_send: grpc connection timeout
:param metrics_registry: metric registry when monitoring is enabled
:param runtime_name: name of the runtime providing the streamer
"""
import json
from jina.serve.streamer import GatewayStreamer
graph_description = json.loads(args.graph_description)
graph_conditions = json.loads(args.graph_conditions)
deployments_addresses = json.loads(args.deployments_addresses)
deployments_disable_reduce = json.loads(args.deployments_disable_reduce)
self.streamer = GatewayStreamer(
graph_representation=graph_description,
executor_addresses=deployments_addresses,
graph_conditions=graph_conditions,
deployments_disable_reduce=deployments_disable_reduce,
timeout_send=timeout_send,
retries=args.retries,
compression=args.compression,
runtime_name=runtime_name,
prefetch=args.prefetch,
logger=self.logger,
metrics_registry=metrics_registry,
)
@abc.abstractmethod
async def setup_server(self):
"""Setup server"""
...
@abc.abstractmethod
async def run_server(self):
"""Run server forever"""
...
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await self.streamer.close()
@abc.abstractmethod
async def stop_server(self):
"""Stop server"""
...
# some servers need to set a flag useful in handling termination signals
# e.g, HTTPGateway/ WebSocketGateway
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return False
|
"""Tool for the Serper.dev Google Search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
class GoogleSerperRun(BaseTool):
"""Tool that queries the Serper.dev Google search API."""
name: str = "google_serper"
description: str = (
"A low-cost Google Search API."
"Useful for when you need to answer questions about current events."
"Input should be a search query."
)
api_wrapper: GoogleSerperAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.run(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.arun(query)).__str__()
class GoogleSerperResults(BaseTool):
"""Tool that queries the Serper.dev Google Search API
and get back json."""
name: str = "google_serper_results_json"
description: str = (
"A low-cost Google Search API."
"Useful for when you need to answer questions about current events."
"Input should be a search query. Output is a JSON object of the query results"
)
api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
"""Tool for the Serper.dev Google Search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
class GoogleSerperRun(BaseTool): # type: ignore[override]
"""Tool that queries the Serper.dev Google search API."""
name: str = "google_serper"
description: str = (
"A low-cost Google Search API."
"Useful for when you need to answer questions about current events."
"Input should be a search query."
)
api_wrapper: GoogleSerperAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.run(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.arun(query)).__str__()
class GoogleSerperResults(BaseTool): # type: ignore[override]
"""Tool that queries the Serper.dev Google Search API
and get back json."""
name: str = "google_serper_results_json"
description: str = (
"A low-cost Google Search API."
"Useful for when you need to answer questions about current events."
"Input should be a search query. Output is a JSON object of the query results"
)
api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
from docarray.predefined_document.audio import Audio
from docarray.predefined_document.image import Image
from docarray.predefined_document.mesh import Mesh3D
from docarray.predefined_document.point_cloud import PointCloud3D
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image', 'Audio', 'Mesh3D', 'PointCloud3D']
|
from docarray.predefined_document.image import Image
from docarray.predefined_document.mesh import Mesh3D
from docarray.predefined_document.point_cloud import PointCloud3D
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image', 'Mesh3D', 'PointCloud3D']
|
"""
Integration Tests of llama-index-vector-stores-mongodb
with MongoDB Atlas Vector Datastore and OPENAI Embedding model.
As described in docs/providers/mongodb/setup.md, to run this, one must
have a running MongoDB Atlas Cluster, and
provide a valid OPENAI_API_KEY.
"""
import os
from time import sleep
from typing import List
import pytest
from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.core.schema import Document
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
from pymongo import MongoClient
from .conftest import lock
@pytest.mark.skipif(
os.environ.get("MONGODB_URI") is None, reason="Requires MONGODB_URI in os.environ"
)
def test_mongodb_connection(atlas_client: MongoClient) -> None:
"""Confirm that the connection to the datastore works."""
assert atlas_client.admin.command("ping")["ok"]
@pytest.mark.skipif(
os.environ.get("MONGODB_URI") is None or os.environ.get("OPENAI_API_KEY") is None,
reason="Requires MONGODB_URI and OPENAI_API_KEY in os.environ",
)
def test_index(
documents: List[Document], vector_store: MongoDBAtlasVectorSearch
) -> None:
"""
End-to-end example from essay and query to response.
via NodeParser, LLM Embedding, VectorStore, and Synthesizer.
"""
with lock:
vector_store._collection.delete_many({})
sleep(2)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
question = "What are LLMs useful for?"
no_response = True
response = None
retries = 5
search_limit = query_engine.retriever.similarity_top_k
while no_response and retries:
response = query_engine.query(question)
if len(response.source_nodes) == search_limit:
no_response = False
else:
retries -= 1
sleep(5)
assert retries
assert "LLM" in response.response
|
"""Integration Tests of llama-index-vector-stores-mongodb
with MongoDB Atlas Vector Datastore and OPENAI Embedding model.
As described in docs/providers/mongodb/setup.md, to run this, one must
have a running MongoDB Atlas Cluster, and
provide a valid OPENAI_API_KEY.
"""
import os
from time import sleep
from typing import List
import pytest
from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.core.schema import Document
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
from pymongo import MongoClient
from .conftest import lock
@pytest.mark.skipif(
os.environ.get("MONGODB_URI") is None, reason="Requires MONGODB_URI in os.environ"
)
def test_mongodb_connection(atlas_client: MongoClient) -> None:
"""Confirm that the connection to the datastore works."""
assert atlas_client.admin.command("ping")["ok"]
@pytest.mark.skipif(
os.environ.get("MONGODB_URI") is None or os.environ.get("OPENAI_API_KEY") is None,
reason="Requires MONGODB_URI and OPENAI_API_KEY in os.environ",
)
def test_index(
documents: List[Document], vector_store: MongoDBAtlasVectorSearch
) -> None:
"""End-to-end example from essay and query to response.
via NodeParser, LLM Embedding, VectorStore, and Synthesizer.
"""
with lock:
vector_store._collection.delete_many({})
sleep(2)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
question = "What are LLMs useful for?"
no_response = True
response = None
retries = 5
search_limit = query_engine.retriever.similarity_top_k
while no_response and retries:
response = query_engine.query(question)
if len(response.source_nodes) == search_limit:
no_response = False
else:
retries -= 1
sleep(5)
assert retries
assert "LLM" in response.response
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_samples=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_teacher_student_val_loop(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
|
import json
import os
import requests
import sys
import time
from typing import Dict, List, Tuple
CHECK_INTERVAL = 30
def get_environment_variables() -> Tuple[str, str, str, str, str]:
"""Retrieve and return necessary environment variables."""
try:
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
event = json.load(f)
# Handle both PR and merge group events
if "pull_request" in event:
sha = event["pull_request"]["head"]["sha"]
else:
sha = os.environ["GITHUB_SHA"]
return (
os.environ["GITHUB_API_URL"],
os.environ["GITHUB_REPOSITORY"],
sha,
os.environ["GITHUB_TOKEN"],
os.environ["GITHUB_RUN_ID"],
)
except KeyError as e:
print(f"Error: Missing required environment variable or event data: {e}")
sys.exit(1)
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
"""Make an API request and return the JSON response."""
try:
print("Making API request to:", url)
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
print(f"Error: API request failed. {e}")
sys.exit(1)
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
"""Process check runs and return their status."""
runs_in_progress = False
all_others_passed = True
for run in check_runs:
if str(run["name"]) != "Check PR Status":
status = run["status"]
conclusion = run["conclusion"]
if status == "completed":
if conclusion not in ["success", "skipped", "neutral"]:
all_others_passed = False
print(
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
)
else:
runs_in_progress = True
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
all_others_passed = False
else:
print(
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
)
return runs_in_progress, all_others_passed
def main():
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
headers = {
"Accept": "application/vnd.github.v3+json",
}
if github_token:
headers["Authorization"] = f"token {github_token}"
print(f"Current run ID: {current_run_id}")
while True:
data = make_api_request(endpoint, headers)
check_runs = data["check_runs"]
print("Processing check runs...")
print(check_runs)
runs_in_progress, all_others_passed = process_check_runs(check_runs)
if not runs_in_progress:
break
print(
"Some check runs are still in progress. "
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
)
time.sleep(CHECK_INTERVAL)
if all_others_passed:
print("All other completed check runs have passed. This check passes.")
sys.exit(0)
else:
print("Some check runs have failed or have not completed. This check fails.")
sys.exit(1)
if __name__ == "__main__":
main()
|
import json
import os
import requests
import sys
import time
from typing import Dict, List, Tuple
CHECK_INTERVAL = 30
def get_environment_variables() -> Tuple[str, str, str, str, str]:
"""Retrieve and return necessary environment variables."""
try:
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
event = json.load(f)
sha = event["pull_request"]["head"]["sha"]
return (
os.environ["GITHUB_API_URL"],
os.environ["GITHUB_REPOSITORY"],
sha,
os.environ["GITHUB_TOKEN"],
os.environ["GITHUB_RUN_ID"],
)
except KeyError as e:
print(f"Error: Missing required environment variable or event data: {e}")
sys.exit(1)
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
"""Make an API request and return the JSON response."""
try:
print("Making API request to:", url)
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
print(f"Error: API request failed. {e}")
sys.exit(1)
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
"""Process check runs and return their status."""
runs_in_progress = False
all_others_passed = True
for run in check_runs:
if str(run["name"]) != "Check PR Status":
status = run["status"]
conclusion = run["conclusion"]
if status == "completed":
if conclusion not in ["success", "skipped", "neutral"]:
all_others_passed = False
print(
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
)
else:
runs_in_progress = True
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
all_others_passed = False
else:
print(
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
)
return runs_in_progress, all_others_passed
def main():
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
headers = {
"Accept": "application/vnd.github.v3+json",
}
if github_token:
headers["Authorization"] = f"token {github_token}"
print(f"Current run ID: {current_run_id}")
while True:
data = make_api_request(endpoint, headers)
check_runs = data["check_runs"]
print("Processing check runs...")
print(check_runs)
runs_in_progress, all_others_passed = process_check_runs(check_runs)
if not runs_in_progress:
break
print(
"Some check runs are still in progress. "
f"Waiting {CHECK_INTERVAL} seconds before checking again..."
)
time.sleep(CHECK_INTERVAL)
if all_others_passed:
print("All other completed check runs have passed. This check passes.")
sys.exit(0)
else:
print("Some check runs have failed or have not completed. This check fails.")
sys.exit(1)
if __name__ == "__main__":
main()
|
# pyre-strict
# mypy: allow-untyped-defs
import os
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Optional, Union
import torch.distributed as dist
from torch.distributed.checkpoint._async_executor import _AsyncCheckpointExecutor
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.planner import SavePlanner
from torch.distributed.checkpoint.storage import StorageWriter
def save_wrapper(
staging_future_or_state_dict: Union[Future[STATE_DICT_TYPE], STATE_DICT_TYPE],
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_writer: Optional[StorageWriter] = None,
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
) -> Future:
from torch.distributed.checkpoint.state_dict_saver import save
staged_dict = (
staging_future_or_state_dict.result()
if isinstance(staging_future_or_state_dict, Future)
else staging_future_or_state_dict
)
return save(
staged_dict,
checkpoint_id=checkpoint_id,
storage_writer=storage_writer,
planner=planner,
process_group=process_group,
)
class _ThreadBasedAsyncCheckpointExecutor(_AsyncCheckpointExecutor):
def __init__(self) -> None:
self._executor = ThreadPoolExecutor(max_workers=1)
def execute_save(
self,
staging_future_or_state_dict: Union[Future[STATE_DICT_TYPE], STATE_DICT_TYPE],
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_writer: Optional[StorageWriter] = None,
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
) -> Future:
f: Future = self._executor.submit(
save_wrapper,
staging_future_or_state_dict=staging_future_or_state_dict,
checkpoint_id=checkpoint_id,
storage_writer=storage_writer,
planner=planner,
process_group=process_group,
)
f.add_done_callback(lambda f: self._executor.shutdown(wait=False))
return f
|
# pyre-strict
# mypy: allow-untyped-defs
import os
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Optional, Union
import torch.distributed as dist
from torch.distributed.checkpoint._async_executor import _AsyncCheckpointExecutor
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.planner import SavePlanner
from torch.distributed.checkpoint.storage import StorageWriter
class _ThreadBasedAsyncCheckpointExecutor(_AsyncCheckpointExecutor):
def __init__(self) -> None:
self._executor = ThreadPoolExecutor(max_workers=1)
def execute_save(
self,
staged_state_dict: STATE_DICT_TYPE,
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_writer: Optional[StorageWriter] = None,
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
) -> Future:
from torch.distributed.checkpoint.state_dict_saver import save
f: Future = self._executor.submit(
save,
staged_state_dict,
checkpoint_id=checkpoint_id,
storage_writer=storage_writer,
planner=planner,
process_group=process_group,
)
f.add_done_callback(lambda f: self._executor.shutdown(wait=False))
return f
|
from typing import Dict
from jina import Client, Document, DocumentArray, Executor, Flow, requests
ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}}
OVERRIDEN_EXECUTOR1_PARAMS = {
'param1': 'changed',
'param2': 60,
'exec_name': {'param1': 'changed'},
}
class DummyOverrideParams(Executor):
@requests()
def bar(self, docs: 'DocumentArray', parameters: Dict, *args, **kwargs):
for doc in docs:
doc.tags = parameters
class DummyAssertNotOverrideBetweenPodsParams(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
assert parameters == ORIGINAL_PARAMS
parameters['param2'] = 'change_in_pod'
class DummyAssertIfParamsCanBeChangedInsidePods(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
# this test is not sure it is intended, but good way of documenting
assert parameters == ORIGINAL_PARAMS
def test_override_params(mocker, port_generator):
exposed_port = port_generator()
f = (
Flow(port=exposed_port)
.add(
uses={'jtype': 'DummyOverrideParams', 'metas': {'name': 'exec_name'}},
)
.add(uses=DummyAssertNotOverrideBetweenPodsParams)
.add(uses=DummyAssertIfParamsCanBeChangedInsidePods)
)
error_mock = mocker.Mock()
with f:
resp = Client(port=exposed_port).index(
inputs=DocumentArray([Document()]),
parameters={'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}},
on_error=error_mock,
return_responses=True,
)
error_mock.assert_not_called()
assert len(resp) == 1
assert len(resp[0].docs) == 1
for doc in resp[0].docs:
assert doc.tags == OVERRIDEN_EXECUTOR1_PARAMS
assert doc.tags['param1'] == 'changed'
assert doc.tags['param2'] == 60
assert doc.tags['exec_name']['param1'] == 'changed'
|
from typing import Dict
from jina import Flow, DocumentArray, Document, Executor, Client, requests
ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}}
OVERRIDEN_EXECUTOR1_PARAMS = {
'param1': 'changed',
'param2': 60,
'exec_name': {'param1': 'changed'},
}
class DummyOverrideParams(Executor):
@requests()
def bar(self, docs: 'DocumentArray', parameters: Dict, *args, **kwargs):
for doc in docs:
doc.tags = parameters
class DummyAssertNotOverrideBetweenPodsParams(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
assert parameters == ORIGINAL_PARAMS
parameters['param2'] = 'change_in_pod'
class DummyAssertIfParamsCanBeChangedInsidePods(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
# this test is not sure it is intended, but good way of documenting
assert parameters == ORIGINAL_PARAMS
def test_override_params(mocker, port_generator):
exposed_port = port_generator()
f = (
Flow(port=exposed_port)
.add(
uses={'jtype': 'DummyOverrideParams', 'metas': {'name': 'exec_name'}},
)
.add(uses=DummyAssertNotOverrideBetweenPodsParams)
.add(uses=DummyAssertIfParamsCanBeChangedInsidePods)
)
error_mock = mocker.Mock()
with f:
resp = Client(port=exposed_port, return_responses=True).index(
inputs=DocumentArray([Document()]),
parameters={'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}},
on_error=error_mock,
)
error_mock.assert_not_called()
assert len(resp) == 1
assert len(resp[0].docs) == 1
for doc in resp[0].docs:
assert doc.tags == OVERRIDEN_EXECUTOR1_PARAMS
assert doc.tags['param1'] == 'changed'
assert doc.tags['param2'] == 60
assert doc.tags['exec_name']['param1'] == 'changed'
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError(
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[List[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError(
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import TencentCOSDirectoryLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TencentCOSDirectoryLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TencentCOSDirectoryLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import TencentCOSDirectoryLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TencentCOSDirectoryLoader": "langchain_community.document_loaders"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TencentCOSDirectoryLoader",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras import _tf_keras as _tf_keras
from keras import activations as activations
from keras import applications as applications
from keras import backend as backend
from keras import callbacks as callbacks
from keras import config as config
from keras import constraints as constraints
from keras import datasets as datasets
from keras import distribution as distribution
from keras import dtype_policies as dtype_policies
from keras import export as export
from keras import initializers as initializers
from keras import layers as layers
from keras import legacy as legacy
from keras import losses as losses
from keras import metrics as metrics
from keras import mixed_precision as mixed_precision
from keras import models as models
from keras import ops as ops
from keras import optimizers as optimizers
from keras import preprocessing as preprocessing
from keras import quantizers as quantizers
from keras import random as random
from keras import regularizers as regularizers
from keras import saving as saving
from keras import tree as tree
from keras import utils as utils
from keras import visualization as visualization
from keras import wrappers as wrappers
from keras.src.backend import Variable as Variable
from keras.src.backend import device as device
from keras.src.backend import name_scope as name_scope
from keras.src.backend.common.keras_tensor import KerasTensor as KerasTensor
from keras.src.backend.common.remat import RematScope as RematScope
from keras.src.backend.common.remat import remat as remat
from keras.src.backend.common.stateless_scope import (
StatelessScope as StatelessScope,
)
from keras.src.backend.common.symbolic_scope import (
SymbolicScope as SymbolicScope,
)
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.layers.core.input_layer import Input as Input
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.layers.layer import Layer as Layer
from keras.src.losses.loss import Loss as Loss
from keras.src.metrics.metric import Metric as Metric
from keras.src.models.model import Model as Model
from keras.src.models.sequential import Sequential as Sequential
from keras.src.ops.function import Function as Function
from keras.src.ops.operation import Operation as Operation
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.regularizers.regularizers import Regularizer as Regularizer
from keras.src.version import __version__ as __version__
from keras.src.version import version as version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api import wrappers
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.remat import RematScope
from keras.src.backend.common.remat import remat
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
_base_ = '../retinanet/retinanet_x101-32x4d_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
type='PISARetinaHead',
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
|
_base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
type='PISARetinaHead',
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
|
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from unittest.mock import patch
from transformers.testing_utils import CaptureStd, require_torch
class CLITest(unittest.TestCase):
@patch("sys.argv", ["fakeprogrampath", "env"])
def test_cli_env(self):
# test transformers env
import transformers.commands.transformers_cli
with CaptureStd() as cs:
transformers.commands.transformers_cli.main()
self.assertIn("Python version", cs.out)
self.assertIn("Platform", cs.out)
self.assertIn("Using distributed or parallel set-up in script?", cs.out)
@require_torch
@patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"])
def test_cli_download(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots"))
@require_torch
@patch(
"sys.argv",
[
"fakeprogrampath",
"download",
"hf-internal-testing/test_dynamic_model_with_tokenizer",
"--trust-remote-code",
"--cache-dir",
"/tmp",
],
)
def test_cli_download_trust_remote(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs"))
self.assertTrue(
os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots")
)
|
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from unittest.mock import patch
from transformers.testing_utils import CaptureStd, require_torch
class CLITest(unittest.TestCase):
@patch("sys.argv", ["fakeprogrampath", "env"])
def test_cli_env(self):
# test transformers-cli env
import transformers.commands.transformers_cli
with CaptureStd() as cs:
transformers.commands.transformers_cli.main()
self.assertIn("Python version", cs.out)
self.assertIn("Platform", cs.out)
self.assertIn("Using distributed or parallel set-up in script?", cs.out)
@require_torch
@patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"])
def test_cli_download(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots"))
@require_torch
@patch(
"sys.argv",
[
"fakeprogrampath",
"download",
"hf-internal-testing/test_dynamic_model_with_tokenizer",
"--trust-remote-code",
"--cache-dir",
"/tmp",
],
)
def test_cli_download_trust_remote(self):
import transformers.commands.transformers_cli
# # remove any previously downloaded model to start clean
shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True)
# run the command
transformers.commands.transformers_cli.main()
# check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs"))
self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs"))
self.assertTrue(
os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots")
)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the MSE loss between the computed query-passage score and a target query-passage score. This loss
is used to distill a cross-encoder model from a teacher cross-encoder model or gold labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.MSELoss`.
.. note::
Be mindful of the magnitude of both the labels and what the model produces. If the teacher model produces
logits with Sigmoid to bound them to [0, 1], then you may wish to use a Sigmoid activation function in the loss.
References:
- Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation: https://arxiv.org/abs/2010.02666
Requirements:
1. Your model must be initialized with `num_labels = 1` (a.k.a. the default) to predict one class.
2. Usually uses a finetuned CrossEncoder teacher M in a knowledge distillation setup.
Inputs:
+-----------------------------------------+-----------------------------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=========================================+=============================+===============================+
| (sentence_A, sentence_B) pairs | similarity score | 1 |
+-----------------------------------------+-----------------------------+-------------------------------+
Relations:
- :class:`MarginMSELoss` is similar to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
student_model = CrossEncoder("microsoft/mpnet-base")
teacher_model = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-12-v2")
train_dataset = Dataset.from_dict({
"query": ["What are pandas?", "What is the capital of France?"],
"answer": ["Pandas are a kind of bear.", "The capital of France is Paris."],
})
def compute_labels(batch):
return {
"label": teacher_model.predict(list(zip(batch["query"], batch["answer"])))
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = CrossEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.loss_fct = nn.MSELoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, **kwargs) -> None:
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss(**kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
:param model: SentenceTransformerModel
:param similarity_fct: Function to compute the PAIRWISE similarity between embeddings. Default is ``util.pairwise_cos_sim``.
:param scale: Output of similarity function is multiplied by scale value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('bert-base-uncased')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=1.0),
InputExample(texts=['My third sentence', 'Unrelated sentence'], label=0.3)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
:param model: SentenceTransformerModel
:param similarity_fct: Function to compute the PAIRWISE similarity between embeddings. Default is ``util.pairwise_cos_sim``.
:param scale: Output of similarity function is multiplied by scale value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('bert-base-uncased')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=1.0),
InputExample(texts=['My third sentence', 'Unrelated sentence'], label=0.3)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`~sentence_transformers.evaluation.TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import io
import pathlib
import platform
import re
import sys
from auditwheel import main_show
def parse_args():
"""Arguments parser."""
parser = argparse.ArgumentParser(
description="Helper for manylinux compliance verification",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--wheel-path", required=True, help="Path of the wheel, mandatory"
)
parser.add_argument(
"--aarch64-compliance-tag",
required=True,
help="ManyLinux compliance tag for aarch64",
)
parser.add_argument(
"--x86_64-compliance-tag",
required=True,
help="ManyLinux compliance tag for x86_64",
)
parser.add_argument(
"--ppc64le-compliance-tag",
required=True,
help="ManyLinux compliance tag for ppc64le",
)
return parser.parse_args()
def get_auditwheel_output(wheel_path: str) -> None:
"""Run "auditwheel show" on the wheel and return the output.
Args:
wheel_path: path of the wheel file
Returns:
"auditwheel show" output
"""
stringio = io.StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
auditwheel_parser = argparse.ArgumentParser(
description="Cross-distro Python wheels."
)
sub_parsers = auditwheel_parser.add_subparsers(metavar="command", dest="cmd")
main_show.configure_parser(sub_parsers)
auditwheel_args = argparse.Namespace(
WHEEL_FILE=pathlib.Path(wheel_path),
DISABLE_ISA_EXT_CHECK=True,
verbose=1,
)
main_show.execute(auditwheel_args, auditwheel_parser)
sys.stdout = previous_stdout
return stringio.getvalue()
def verify_manylinux_compliance(
auditwheel_log: str,
compliance_tag: str,
) -> None:
"""Verify manylinux compliance.
Args:
auditwheel_log: "auditwheel show" execution results
compliance_tag: manyLinux compliance tag
Raises:
RuntimeError: if the wheel is not manyLinux compliant.
"""
regex = 'following platform tag:\s+"{}"'.format(compliance_tag)
alt_regex = regex.replace("2014", "_2_17")
if not (
re.search(regex, auditwheel_log) or re.search(alt_regex, auditwheel_log)
):
raise RuntimeError(
("The wheel is not compliant with the tag {tag}.\n{result}").format(
tag=compliance_tag, result=auditwheel_log
)
)
def test_manylinux_compliance(args):
machine_type = platform.uname().machine
supported_machine_types = ["x86_64", "aarch64", "ppc64le"]
if machine_type not in supported_machine_types:
raise RuntimeError(
"Unsupported machine type {machine_type}. The supported are:"
" {supported_types}".format(
machine_type=machine_type, supported_types=supported_machine_types
)
)
if machine_type == "x86_64":
compliance_tag = args.x86_64_compliance_tag
elif machine_type == "aarch64":
compliance_tag = args.aarch64_compliance_tag
else:
compliance_tag = args.ppc64le_compliance_tag
auditwheel_output = get_auditwheel_output(args.wheel_path)
verify_manylinux_compliance(
auditwheel_output,
compliance_tag,
)
if __name__ == "__main__":
test_manylinux_compliance(parse_args())
|
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import io
import platform
import re
import sys
from auditwheel import main_show
def parse_args():
"""Arguments parser."""
parser = argparse.ArgumentParser(
description="Helper for manylinux compliance verification",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--wheel-path", required=True, help="Path of the wheel, mandatory"
)
parser.add_argument(
"--aarch64-compliance-tag",
required=True,
help="ManyLinux compliance tag for aarch64",
)
parser.add_argument(
"--x86_64-compliance-tag",
required=True,
help="ManyLinux compliance tag for x86_64",
)
parser.add_argument(
"--ppc64le-compliance-tag",
required=True,
help="ManyLinux compliance tag for ppc64le",
)
return parser.parse_args()
def get_auditwheel_output(wheel_path: str) -> None:
"""Run "auditwheel show" on the wheel and return the output.
Args:
wheel_path: path of the wheel file
Returns:
"auditwheel show" output
"""
stringio = io.StringIO()
previous_stdout = sys.stdout
sys.stdout = stringio
auditwheel_parser = argparse.ArgumentParser(
description="Cross-distro Python wheels."
)
sub_parsers = auditwheel_parser.add_subparsers(metavar="command", dest="cmd")
main_show.configure_parser(sub_parsers)
auditwheel_args = argparse.Namespace(
WHEEL_FILE=wheel_path,
verbose=1,
)
main_show.execute(args=auditwheel_args, p=auditwheel_parser)
sys.stdout = previous_stdout
return stringio.getvalue()
def verify_manylinux_compliance(
auditwheel_log: str,
compliance_tag: str,
) -> None:
"""Verify manylinux compliance.
Args:
auditwheel_log: "auditwheel show" execution results
compliance_tag: manyLinux compliance tag
Raises:
RuntimeError: if the wheel is not manyLinux compliant.
"""
regex = 'following platform tag:\s+"{}"'.format(compliance_tag)
alt_regex = regex.replace("2014", "_2_17")
if not (
re.search(regex, auditwheel_log) or re.search(alt_regex, auditwheel_log)
):
raise RuntimeError(
("The wheel is not compliant with the tag {tag}.\n{result}").format(
tag=compliance_tag, result=auditwheel_log
)
)
def test_manylinux_compliance(args):
machine_type = platform.uname().machine
supported_machine_types = ["x86_64", "aarch64", "ppc64le"]
if machine_type not in supported_machine_types:
raise RuntimeError(
"Unsupported machine type {machine_type}. The supported are:"
" {supported_types}".format(
machine_type=machine_type, supported_types=supported_machine_types
)
)
if machine_type == "x86_64":
compliance_tag = args.x86_64_compliance_tag
elif machine_type == "aarch64":
compliance_tag = args.aarch64_compliance_tag
else:
compliance_tag = args.ppc64le_compliance_tag
auditwheel_output = get_auditwheel_output(args.wheel_path)
verify_manylinux_compliance(
auditwheel_output,
compliance_tag,
)
if __name__ == "__main__":
test_manylinux_compliance(parse_args())
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thrs=0.5,
ioa_thrs=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thrs=0.5,
ioa_thrs=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_levit import *
from .feature_extraction_levit import *
from .image_processing_levit import *
from .image_processing_levit_fast import *
from .modeling_levit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_levit import *
from .feature_extraction_levit import *
from .image_processing_levit import *
from .modeling_levit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Union,
Optional,
Dict,
)
from qdrant_client.http.models.models import Distance
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=filter,
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
from qdrant_client.http.models.models import Distance
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
from typing import Optional
import torch
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_paged_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
is_causal: Optional[bool] = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
cache = kwargs.pop("cache", None)
if cache is not None:
key, value = cache.update(key, value, module.layer_idx, **kwargs)
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
causal_mask = attention_mask
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=causal_mask,
dropout_p=dropout,
scale=scaling,
is_causal=False,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
|
from typing import Optional, Tuple
import torch
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_paged_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
is_causal: Optional[bool] = None,
**kwargs,
) -> Tuple[torch.Tensor, None]:
cache = kwargs.pop("cache", None)
if cache is not None:
key, value = cache.update(key, value, module.layer_idx, **kwargs)
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
causal_mask = attention_mask
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=causal_mask,
dropout_p=dropout,
scale=scaling,
is_causal=False,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
|
from abc import ABC
import pytest
from docarray import DocumentArray
from docarray.array.storage.memory import GetSetDelMixin, SequenceLikeMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
type_convert = {
'int': b'NUMERIC',
'float': b'NUMERIC',
'double': b'NUMERIC',
'long': b'NUMERIC',
'str': b'TEXT',
'bytes': b'TEXT',
'bool': b'NUMERIC',
}
@pytest.fixture(scope='function')
def da_redis():
cfg = RedisConfig(n_dim=128, flush=True)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize('distance', ['L2', 'IP', 'COSINE'])
@pytest.mark.parametrize(
'method,initial_cap,ef_construction,block_size',
[
('HNSW', 10, 250, 1000000),
('FLAT', 10, 250, 1000000),
],
)
@pytest.mark.parametrize(
'columns',
[
[('attr1', 'str'), ('attr2', 'bytes')],
[('attr1', 'int'), ('attr2', 'float')],
[('attr1', 'double'), ('attr2', 'long'), ('attr3', 'bool')],
{'attr1': 'str', 'attr2': 'bytes'},
{'attr1': 'int', 'attr2': 'float'},
{'attr1': 'double', 'attr2': 'long', 'attr3': 'bool'},
],
)
@pytest.mark.parametrize(
'redis_config',
[
{'decode_responses': True},
{'decode_responses': False},
{'retry_on_timeout': True},
{'decode_responses': True, 'retry_on_timeout': True},
{},
],
)
def test_init_storage(
distance,
columns,
method,
initial_cap,
ef_construction,
block_size,
redis_config,
start_storage,
):
cfg = RedisConfig(
n_dim=128,
distance=distance,
flush=True,
columns=columns,
method=method,
initial_cap=initial_cap,
ef_construction=ef_construction,
block_size=block_size,
redis_config=redis_config,
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.info()['tcp_port'] == redis_da._config.port
assert redis_da._client.ft().info()['attributes'][0][1] == b'embedding'
assert redis_da._client.ft().info()['attributes'][0][5] == b'VECTOR'
def test_init_storage_update_schema(start_storage):
cfg = RedisConfig(n_dim=128, columns={'attr1': 'str'}, flush=True)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr1'
cfg = RedisConfig(n_dim=128, columns={'attr2': 'str'}, update_schema=False)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr1'
cfg = RedisConfig(n_dim=128, columns={'attr2': 'str'}, update_schema=True)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr2'
|
from abc import ABC
import pytest
from docarray import DocumentArray
from docarray.array.storage.memory import GetSetDelMixin, SequenceLikeMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
type_convert = {
'int': b'NUMERIC',
'float': b'NUMERIC',
'double': b'NUMERIC',
'long': b'NUMERIC',
'str': b'TEXT',
'bytes': b'TEXT',
'bool': b'NUMERIC',
}
@pytest.fixture(scope='function')
def da_redis():
cfg = RedisConfig(n_dim=128, flush=True)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize('distance', ['L2', 'IP', 'COSINE'])
@pytest.mark.parametrize(
'method,initial_cap,ef_construction,block_size',
[
('HNSW', 10, 250, 1000000),
('FLAT', 10, 250, 1000000),
],
)
@pytest.mark.parametrize(
'columns',
[
[('attr1', 'str'), ('attr2', 'bytes')],
[('attr1', 'int'), ('attr2', 'float')],
[('attr1', 'double'), ('attr2', 'long'), ('attr3', 'bool')],
],
)
@pytest.mark.parametrize(
'redis_config',
[
{'decode_responses': True},
{'decode_responses': False},
{'retry_on_timeout': True},
{'decode_responses': True, 'retry_on_timeout': True},
{},
],
)
def test_init_storage(
distance,
columns,
method,
initial_cap,
ef_construction,
block_size,
redis_config,
start_storage,
):
cfg = RedisConfig(
n_dim=128,
distance=distance,
flush=True,
columns=columns,
method=method,
initial_cap=initial_cap,
ef_construction=ef_construction,
block_size=block_size,
redis_config=redis_config,
)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.info()['tcp_port'] == redis_da._config.port
assert redis_da._client.ft().info()['attributes'][0][1] == b'embedding'
assert redis_da._client.ft().info()['attributes'][0][5] == b'VECTOR'
for i in range(len(columns)):
assert redis_da._client.ft().info()['attributes'][i + 1][1] == bytes(
redis_da._config.columns[i][0], 'utf-8'
)
assert (
redis_da._client.ft().info()['attributes'][i + 1][5]
== type_convert[redis_da._config.columns[i][1]]
)
def test_init_storage_update_schema(start_storage):
cfg = RedisConfig(n_dim=128, columns=[('attr1', 'str')], flush=True)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr1'
cfg = RedisConfig(n_dim=128, columns=[('attr2', 'str')], update_schema=False)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr1'
cfg = RedisConfig(n_dim=128, columns=[('attr2', 'str')], update_schema=True)
redis_da = DocumentArrayDummy(storage='redis', config=cfg)
assert redis_da._client.ft().info()['attributes'][1][1] == b'attr2'
|
"""Test GooseAI"""
import pytest
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.gooseai import GooseAI
from langchain_community.utils.openai import is_openai_v1
def _openai_v1_installed() -> bool:
try:
return is_openai_v1()
except Exception as _:
return False
@pytest.mark.requires("openai")
def test_api_key_is_secret_string() -> None:
llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.gooseai_api_key, SecretStr)
assert llm.gooseai_api_key.get_secret_value() == "secret-api-key"
@pytest.mark.skipif(
_openai_v1_installed(), reason="GooseAI currently only works with openai<1"
)
@pytest.mark.requires("openai")
def test_api_key_masked_when_passed_via_constructor() -> None:
llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type]
assert str(llm.gooseai_api_key) == "**********"
assert "secret-api-key" not in repr(llm.gooseai_api_key)
assert "secret-api-key" not in repr(llm)
@pytest.mark.skipif(
_openai_v1_installed(), reason="GooseAI currently only works with openai<1"
)
@pytest.mark.requires("openai")
def test_api_key_masked_when_passed_from_env() -> None:
with MonkeyPatch.context() as mp:
mp.setenv("GOOSEAI_API_KEY", "secret-api-key")
llm = GooseAI()
assert str(llm.gooseai_api_key) == "**********"
assert "secret-api-key" not in repr(llm.gooseai_api_key)
assert "secret-api-key" not in repr(llm)
|
"""Test GooseAI"""
import pytest
from pydantic import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.gooseai import GooseAI
from langchain_community.utils.openai import is_openai_v1
def _openai_v1_installed() -> bool:
try:
return is_openai_v1()
except Exception as _:
return False
@pytest.mark.requires("openai")
def test_api_key_is_secret_string() -> None:
llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert isinstance(llm.gooseai_api_key, SecretStr)
assert llm.gooseai_api_key.get_secret_value() == "secret-api-key"
@pytest.mark.skipif(
_openai_v1_installed(), reason="GooseAI currently only works with openai<1"
)
@pytest.mark.requires("openai")
def test_api_key_masked_when_passed_via_constructor() -> None:
llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert str(llm.gooseai_api_key) == "**********"
assert "secret-api-key" not in repr(llm.gooseai_api_key)
assert "secret-api-key" not in repr(llm)
@pytest.mark.skipif(
_openai_v1_installed(), reason="GooseAI currently only works with openai<1"
)
@pytest.mark.requires("openai")
def test_api_key_masked_when_passed_from_env() -> None:
with MonkeyPatch.context() as mp:
mp.setenv("GOOSEAI_API_KEY", "secret-api-key")
llm = GooseAI() # type: ignore[call-arg]
assert str(llm.gooseai_api_key) == "**********"
assert "secret-api-key" not in repr(llm.gooseai_api_key)
assert "secret-api-key" not in repr(llm)
|
_base_ = './decoupled-solo_r50_fpn_3x_coco.py'
# model settings
model = dict(
mask_head=dict(
type='DecoupledSOLOLightHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384),
(852, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(852, 512), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './decoupled-solo_r50_fpn_3x_coco.py'
# model settings
model = dict(
mask_head=dict(
type='DecoupledSOLOLightHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384),
(852, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(852, 512), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
"""Base class for Slack tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.slack.utils import login
if TYPE_CHECKING:
# This is for linting and IDE typehints
from slack_sdk import WebClient
else:
try:
# We do this so pydantic can resolve the types when instantiating
from slack_sdk import WebClient
except ImportError:
pass
class SlackBaseTool(BaseTool):
"""Base class for Slack tools."""
client: WebClient = Field(default_factory=login)
"""The WebClient object."""
|
"""Base class for Slack tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.slack.utils import login
if TYPE_CHECKING:
# This is for linting and IDE typehints
from slack_sdk import WebClient
else:
try:
# We do this so pydantic can resolve the types when instantiating
from slack_sdk import WebClient
except ImportError:
pass
class SlackBaseTool(BaseTool): # type: ignore[override]
"""Base class for Slack tools."""
client: WebClient = Field(default_factory=login)
"""The WebClient object."""
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeRegularizerWeightSchedulerCallback
__all__ = ["SpladeRegularizerWeightSchedulerCallback", "SchedulerType"]
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeLambdaSchedulerCallback
__all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers import SentenceTransformer, LoggingHandler, models, util, InputExample
from sentence_transformers import losses
import os
import gzip
import csv
from datetime import datetime
import logging
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
## Training parameters
model_name = 'distilbert-base-uncased'
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = 'output/train_stsb_ct-{}-{}'.format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = 'data/wiki1m_for_simcse.txt'
if not os.path.exists(wikipedia_dataset_path):
util.http_get('https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt', wikipedia_dataset_path)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, 'r', encoding='utf8') as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = 'data/stsbenchmark'
sts_dataset_path = f'{data_folder}/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
################# Intialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={'lr': 1e-5},
output_path=model_save_path,
use_amp=False #Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
_base_ = 'yolact_r50_1x8_coco.py'
optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[20, 42, 49, 52])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'yolact_r50_1x8_coco.py'
optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[20, 42, 49, 52])
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_graph_metadata, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adamw import AdamW
class AdamWTest(testing.TestCase):
def test_config(self):
optimizer = AdamW(
learning_rate=0.5,
weight_decay=0.008,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = AdamW(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4980, 1.4960, 2.494, 3.492], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_weight_decay_is_none(self):
with self.assertRaisesRegex(
ValueError,
"Argument `weight_decay` must be a float. "
"Received: weight_decay=None",
):
AdamW(learning_rate=1.0, weight_decay=None)
def test_correctness_with_golden(self):
optimizer = AdamW(learning_rate=1.0, weight_decay=0.5, epsilon=2)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998],
[0.2486, 0.2475, 0.2463, 0.2451, 0.244, 0.2428, 0.2417, 0.2405, 0.2394, 0.2382],
[0.1223, 0.1198, 0.1174, 0.1149, 0.1124, 0.11, 0.1075, 0.1051, 0.1027, 0.1003],
[0.0586, 0.0549, 0.0512, 0.0475, 0.0439, 0.0402, 0.0366, 0.033, 0.0294, 0.0258],
[0.0263, 0.0215, 0.0167, 0.012, 0.0073, 0.0026, -0.0021, -0.0067, -0.0113, -0.0159]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = AdamW(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = AdamW(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adamw import AdamW
class AdamWTest(testing.TestCase):
def test_config(self):
optimizer = AdamW(
learning_rate=0.5,
weight_decay=0.008,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = AdamW(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.4980, 1.4960, 2.494, 3.492], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = AdamW(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = AdamW(learning_rate=1.0, weight_decay=0.5, epsilon=2)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998],
[0.2486, 0.2475, 0.2463, 0.2451, 0.244, 0.2428, 0.2417, 0.2405, 0.2394, 0.2382],
[0.1223, 0.1198, 0.1174, 0.1149, 0.1124, 0.11, 0.1075, 0.1051, 0.1027, 0.1003],
[0.0586, 0.0549, 0.0512, 0.0475, 0.0439, 0.0402, 0.0366, 0.033, 0.0294, 0.0258],
[0.0263, 0.0215, 0.0167, 0.012, 0.0073, 0.0026, -0.0021, -0.0067, -0.0113, -0.0159]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = AdamW(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = AdamW(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINRuleOps
from langchain_community.tools.ainetwork.rule import RuleSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RuleSchema": "langchain_community.tools.ainetwork.rule",
"AINRuleOps": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINRuleOps",
"RuleSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINRuleOps
from langchain_community.tools.ainetwork.rule import RuleSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RuleSchema": "langchain_community.tools.ainetwork.rule",
"AINRuleOps": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RuleSchema",
"AINRuleOps",
]
|
"""Official evaluation script for v1.1 of the SQuAD dataset."""
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = [x["text"] for x in qa["answers"]]
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
""" Official evaluation script for v1.1 of the SQuAD dataset. """
import argparse
import json
import re
import string
import sys
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article["paragraphs"]:
for qa in paragraph["qas"]:
total += 1
if qa["id"] not in predictions:
message = "Unanswered question " + qa["id"] + " will receive score 0."
print(message, file=sys.stderr)
continue
ground_truths = [x["text"] for x in qa["answers"]]
prediction = predictions[qa["id"]]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
if __name__ == "__main__":
expected_version = "1.1"
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
parser.add_argument("dataset_file", help="Dataset file")
parser.add_argument("prediction_file", help="Prediction File")
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json["version"] != expected_version:
print(
"Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
file=sys.stderr,
)
dataset = dataset_json["data"]
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Groq(OpenAILike):
"""
Groq LLM.
Examples:
`pip install llama-index-llms-groq`
```python
from llama_index.llms.groq import Groq
# Set up the Groq class with the required model and API key
llm = Groq(model="llama3-70b-8192", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of low latency LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.groq.com/openai/v1",
is_chat_model: bool = True,
is_function_calling_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("GROQ_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
is_function_calling_model=is_function_calling_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Groq"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Groq(OpenAILike):
"""
Groq LLM.
Examples:
`pip install llama-index-llms-groq`
```python
from llama_index.llms.groq import Groq
# Set up the Groq class with the required model and API key
llm = Groq(model="llama3-70b-8192", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of low latency LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.groq.com/openai/v1",
is_chat_model: bool = True,
is_function_calling_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("GROQ_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
is_function_calling_model=is_function_calling_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Groq"
|
import copy
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
list_like: bool = True
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
n_components: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
root_id: bool = True
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
config = copy.deepcopy(config)
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
if config.data_path is None:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
self._list_like = config.pop("list_like")
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage(**kwargs)
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
import copy
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
list_like: bool = True
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
n_components: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
config = copy.deepcopy(config)
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
if config.data_path is None:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
self._list_like = config.pop("list_like")
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.monotone_constraints import is_correctly_constrained, training_dset
rng = np.random.RandomState(1994)
def non_decreasing(L: np.ndarray) -> bool:
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
def non_increasing(L: np.ndarray) -> bool:
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
def assert_constraint(constraint: int, tree_method: str) -> None:
from sklearn.datasets import make_regression
n = 1000
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
dtrain = xgb.DMatrix(X, y)
param = {}
param["tree_method"] = tree_method
param["device"] = "cuda"
param["monotone_constraints"] = "(" + str(constraint) + ")"
bst = xgb.train(param, dtrain)
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
pred = bst.predict(dpredict)
if constraint > 0:
assert non_decreasing(pred)
elif constraint < 0:
assert non_increasing(pred)
@pytest.mark.skipif(**tm.no_sklearn())
def test_gpu_hist_basic():
assert_constraint(1, "hist")
assert_constraint(-1, "hist")
@pytest.mark.skipif(**tm.no_sklearn())
def test_gpu_approx_basic():
assert_constraint(1, "approx")
assert_constraint(-1, "approx")
def test_gpu_hist_depthwise():
params = {
"tree_method": "hist",
"grow_policy": "depthwise",
"device": "cuda",
"monotone_constraints": "(1, -1)",
}
model = xgb.train(params, training_dset)
is_correctly_constrained(model)
def test_gpu_hist_lossguide():
params = {
"tree_method": "hist",
"grow_policy": "lossguide",
"device": "cuda",
"monotone_constraints": "(1, -1)",
}
model = xgb.train(params, training_dset)
is_correctly_constrained(model)
|
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_monotone_constraints as tmc
rng = np.random.RandomState(1994)
def non_decreasing(L):
return all((x - y) < 0.001 for x, y in zip(L, L[1:]))
def non_increasing(L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
def assert_constraint(constraint, tree_method):
from sklearn.datasets import make_regression
n = 1000
X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1)
dtrain = xgb.DMatrix(X, y)
param = {}
param["tree_method"] = tree_method
param["monotone_constraints"] = "(" + str(constraint) + ")"
bst = xgb.train(param, dtrain)
dpredict = xgb.DMatrix(X[X[:, 0].argsort()])
pred = bst.predict(dpredict)
if constraint > 0:
assert non_decreasing(pred)
elif constraint < 0:
assert non_increasing(pred)
@pytest.mark.skipif(**tm.no_sklearn())
def test_gpu_hist_basic():
assert_constraint(1, "gpu_hist")
assert_constraint(-1, "gpu_hist")
def test_gpu_hist_depthwise():
params = {
"tree_method": "gpu_hist",
"grow_policy": "depthwise",
"monotone_constraints": "(1, -1)",
}
model = xgb.train(params, tmc.training_dset)
tmc.is_correctly_constrained(model)
def test_gpu_hist_lossguide():
params = {
"tree_method": "gpu_hist",
"grow_policy": "lossguide",
"monotone_constraints": "(1, -1)",
}
model = xgb.train(params, tmc.training_dset)
tmc.is_correctly_constrained(model)
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
from typing import Callable, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
def reduce_loss(loss: Tensor, reduction: str) -> Tensor:
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss: Tensor,
weight: Optional[Tensor] = None,
reduction: str = 'mean',
avg_factor: Optional[float] = None) -> Tensor:
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Optional[Tensor], optional): Element-wise weights.
Defaults to None.
reduction (str, optional): Same as built-in losses of PyTorch.
Defaults to 'mean'.
avg_factor (Optional[float], optional): Average factor when
computing the mean of losses. Defaults to None.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func: Callable) -> Callable:
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
reduction: str = 'mean',
avg_factor: Optional[int] = None,
**kwargs) -> Tensor:
"""
Args:
pred (Tensor): The prediction.
target (Tensor): Target bboxes.
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
reduction (str, optional): Options are "none", "mean" and "sum".
Defaults to 'mean'.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
Returns:
Tensor: Loss tensor.
"""
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
from typing import List, Optional
import torchaudio
from torchaudio._internal.module_utils import deprecated
# TODO: Once legacy global backend is removed, move this to torchaudio.__init__
def _init_backend():
from . import utils
torchaudio.info = utils.get_info_func()
torchaudio.load = utils.get_load_func()
torchaudio.save = utils.get_save_func()
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
list of str: The list of available backends.
The possible values are;
- Dispatcher mode: ``"ffmpeg"``, ``"sox"`` and ``"soundfile"``.
- Legacy backend mode: ``"sox_io"``, ``"soundfile"``.
"""
from . import utils
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def get_audio_backend() -> Optional[str]:
"""Get the name of the current global backend
Returns:
str or None:
If dispatcher mode is enabled, returns ``None`` otherwise,
the name of current backend or ``None`` (no backend is set).
"""
return None
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def set_audio_backend(backend: Optional[str]): # noqa
"""Set the global backend.
This is a no-op when dispatcher mode is enabled.
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
pass
|
from typing import List, Optional
import torchaudio
from torchaudio._internal.module_utils import deprecated
from . import utils
# TODO: Once legacy global backend is removed, move this to torchaudio.__init__
def _init_backend():
torchaudio.info = utils.get_info_func()
torchaudio.load = utils.get_load_func()
torchaudio.save = utils.get_save_func()
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
list of str: The list of available backends.
The possible values are;
- Dispatcher mode: ``"ffmpeg"``, ``"sox"`` and ``"soundfile"``.
- Legacy backend mode: ``"sox_io"``, ``"soundfile"``.
"""
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def get_audio_backend() -> Optional[str]:
"""Get the name of the current global backend
Returns:
str or None:
If dispatcher mode is enabled, returns ``None`` otherwise,
the name of current backend or ``None`` (no backend is set).
"""
return None
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def set_audio_backend(backend: Optional[str]): # noqa
"""Set the global backend.
This is a no-op when dispatcher mode is enabled.
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
pass
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import TextUrl
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES],
)
def test_validation(path_to_file):
url = parse_obj_as(TextUrl, path_to_file)
assert isinstance(url, TextUrl)
assert isinstance(url, str)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import TextUrl
from tests import TOYDATA_DIR
REMOTE_TEXT_FILE = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TEXT_FILES = [
str(TOYDATA_DIR / 'penal_colony.txt'),
str(TOYDATA_DIR / 'test.md'),
str(TOYDATA_DIR / 'test.html'),
str(TOYDATA_DIR / 'test.css'),
str(TOYDATA_DIR / 'test.csv'),
str(TOYDATA_DIR / 'test.log'),
]
LOCAL_TEXT_FILES_AND_BEGINNING = [
(str(TOYDATA_DIR / 'penal_colony.txt'), '“It’s a peculiar apparatus,”'),
(str(TOYDATA_DIR / 'test.md'), "# Hello"),
(str(TOYDATA_DIR / 'test.html'), "<html>"),
(str(TOYDATA_DIR / 'test.css'), 'body {'),
(str(TOYDATA_DIR / 'test.csv'), "John,Doe"),
(str(TOYDATA_DIR / 'test.log'), "2022-11-25 12:34:56 INFO: Program started"),
]
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TEXT_FILE, '<!DOCTYPE html>'), *LOCAL_TEXT_FILES_AND_BEGINNING],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_bytes()
assert isinstance(txt_bytes, bytes)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TEXT_FILE, *LOCAL_TEXT_FILES])
def test_proto_text_url(url):
uri = parse_obj_as(TextUrl, url)
proto = uri._to_node_protobuf()
assert 'text_url' in str(proto)
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
@pytest.mark.internet
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TEXT_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.mp3',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='TextUrl'):
parse_obj_as(TextUrl, path_to_file)
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeRegularizerWeightSchedulerCallback
__all__ = ["SpladeRegularizerWeightSchedulerCallback", "SchedulerType"]
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeWeightRegulizerSchedulerCallback
__all__ = ["SpladeWeightRegulizerSchedulerCallback", "SchedulerType"]
|
"""Embeddings."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
__all__ = ["DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings"]
_dynamic_imports = {
"Embeddings": "embeddings",
"DeterministicFakeEmbedding": "fake",
"FakeEmbeddings": "fake",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Embeddings."""
from langchain_core.embeddings.embeddings import Embeddings
from langchain_core.embeddings.fake import DeterministicFakeEmbedding, FakeEmbeddings
__all__ = ["DeterministicFakeEmbedding", "Embeddings", "FakeEmbeddings"]
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_add_function.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Note that zamba does not have the `apply_rotary_pos_emb` function!
from typing import Optional
import torch
from torch import nn
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_add_function.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Note that zamba does not have the `apply_rotary_pos_emb` function!
from typing import Optional, Tuple
import torch
from torch import nn
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
def forward(self) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = mmcv.load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
|
import argparse
import glob
import os.path as osp
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = mmcv.load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.metrics import deserialize
from keras.src.metrics import get
from keras.src.metrics import serialize
from keras.src.metrics.accuracy_metrics import Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy
from keras.src.metrics.accuracy_metrics import CategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import binary_accuracy
from keras.src.metrics.accuracy_metrics import categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy
from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy
from keras.src.metrics.confusion_metrics import AUC
from keras.src.metrics.confusion_metrics import FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives
from keras.src.metrics.confusion_metrics import Precision
from keras.src.metrics.confusion_metrics import PrecisionAtRecall
from keras.src.metrics.confusion_metrics import Recall
from keras.src.metrics.confusion_metrics import RecallAtPrecision
from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity
from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity
from keras.src.metrics.confusion_metrics import TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives
from keras.src.metrics.correlation_metrics import ConcordanceCorrelation
from keras.src.metrics.correlation_metrics import PearsonCorrelation
from keras.src.metrics.correlation_metrics import concordance_correlation
from keras.src.metrics.correlation_metrics import pearson_correlation
from keras.src.metrics.f_score_metrics import F1Score
from keras.src.metrics.f_score_metrics import FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU
from keras.src.metrics.iou_metrics import IoU
from keras.src.metrics.iou_metrics import MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU
from keras.src.metrics.metric import Metric
from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy
from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy
from keras.src.metrics.probabilistic_metrics import KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean
from keras.src.metrics.reduction_metrics import MeanMetricWrapper
from keras.src.metrics.reduction_metrics import Sum
from keras.src.metrics.regression_metrics import CosineSimilarity
from keras.src.metrics.regression_metrics import LogCoshError
from keras.src.metrics.regression_metrics import MeanAbsoluteError
from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError
from keras.src.metrics.regression_metrics import MeanSquaredError
from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError
from keras.src.metrics.regression_metrics import R2Score
from keras.src.metrics.regression_metrics import RootMeanSquaredError
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.metrics import deserialize
from keras.src.metrics import get
from keras.src.metrics import serialize
from keras.src.metrics.accuracy_metrics import Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy
from keras.src.metrics.accuracy_metrics import CategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import binary_accuracy
from keras.src.metrics.accuracy_metrics import categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy
from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy
from keras.src.metrics.confusion_metrics import AUC
from keras.src.metrics.confusion_metrics import FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives
from keras.src.metrics.confusion_metrics import Precision
from keras.src.metrics.confusion_metrics import PrecisionAtRecall
from keras.src.metrics.confusion_metrics import Recall
from keras.src.metrics.confusion_metrics import RecallAtPrecision
from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity
from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity
from keras.src.metrics.confusion_metrics import TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives
from keras.src.metrics.f_score_metrics import F1Score
from keras.src.metrics.f_score_metrics import FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU
from keras.src.metrics.iou_metrics import IoU
from keras.src.metrics.iou_metrics import MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU
from keras.src.metrics.metric import Metric
from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy
from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy
from keras.src.metrics.probabilistic_metrics import KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean
from keras.src.metrics.reduction_metrics import MeanMetricWrapper
from keras.src.metrics.reduction_metrics import Sum
from keras.src.metrics.regression_metrics import CosineSimilarity
from keras.src.metrics.regression_metrics import LogCoshError
from keras.src.metrics.regression_metrics import MeanAbsoluteError
from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError
from keras.src.metrics.regression_metrics import MeanSquaredError
from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError
from keras.src.metrics.regression_metrics import R2Score
from keras.src.metrics.regression_metrics import RootMeanSquaredError
|
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import TreebankWordDetokenizer, word_tokenize
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from typing import TYPE_CHECKING, Dict, Iterable
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Dict, Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_get_set_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client_args.protocol == GatewayProtocolType.from_string(protocol)
assert f.gateway_args.protocol == GatewayProtocolType.from_string(protocol)
assert int(f.client_args.port) == 12345
assert int(f.gateway_args.port) == 12345
f._update_network_interface(port=54321)
assert int(f.client_args.port) == 54321
assert int(f.gateway_args.port) == 54321
|
import pytest
from jina import Flow
from jina.enums import GatewayProtocolType
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('changeto_protocol', ['grpc', 'http', 'websocket'])
def test_change_gateway(protocol, changeto_protocol):
f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all()
with f:
da = f.post('/', random_docs(10))
assert len(da) == 10
with pytest.raises(RuntimeError):
f.protocol = changeto_protocol
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
def test_get_set_client_gateway_in_flow(protocol):
f = Flow(protocol=protocol, port=12345)
assert f.client_args.protocol == GatewayProtocolType.from_string(protocol)
assert f.gateway_args.protocol == GatewayProtocolType.from_string(protocol)
assert f.client_args.port == 12345
assert f.gateway_args.port == 12345
f._update_network_interface(port=54321)
assert f.client_args.port == 54321
assert f.gateway_args.port == 54321
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import Namespace
def deployment(args: 'Namespace'):
"""
Start a Deployment
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.deployments import Deployment
try:
with Deployment(args) as d:
d.join()
except KeyboardInterrupt:
pass
def pod(args: 'Namespace'):
"""
Start a Pod
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.pods.factory import PodFactory
try:
with PodFactory.build_pod(args) as p:
p.join()
except KeyboardInterrupt:
pass
def executor_native(args: 'Namespace'):
"""
Starts an Executor in a WorkerRuntime
:param args: arguments coming from the CLI.
"""
if args.runtime_cls == 'WorkerRuntime':
from jina.serve.runtimes.worker import WorkerRuntime
runtime_cls = WorkerRuntime
elif args.runtime_cls == 'HeadRuntime':
from jina.serve.runtimes.head import HeadRuntime
runtime_cls = HeadRuntime
else:
raise RuntimeError(
f' runtime_cls {args.runtime_cls} is not supported with `--native` argument. `WorkerRuntime` is supported'
)
with runtime_cls(args) as rt:
name = (
rt._data_request_handler._executor.metas.name
if hasattr(rt, '_data_request_handler')
else rt.name
)
rt.logger.info(f'Executor {name} started')
rt.run_forever()
def executor(args: 'Namespace'):
"""
Starts an Executor in any Runtime
:param args: arguments coming from the CLI.
:returns: return the same as `pod` or `worker_runtime`
"""
if args.native:
return executor_native(args)
else:
return pod(args)
def worker_runtime(args: 'Namespace'):
"""
Starts a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.serve.runtimes.worker import WorkerRuntime
with WorkerRuntime(args) as runtime:
runtime.logger.info(
f'Executor {runtime._data_request_handler._executor.metas.name} started'
)
runtime.run_forever()
def gateway(args: 'Namespace'):
"""
Start a Gateway Deployment
:param args: arguments coming from the CLI.
"""
from jina.enums import GatewayProtocolType
from jina.serve.runtimes import get_runtime
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCGatewayRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime',
GatewayProtocolType.HTTP: 'HTTPGatewayRuntime',
}
runtime_cls = get_runtime(gateway_runtime_dict[args.protocol])
with runtime_cls(args) as runtime:
runtime.logger.info(
f'Gateway with protocol {gateway_runtime_dict[args.protocol]} started'
)
runtime.run_forever()
def ping(args: 'Namespace'):
"""
Check the connectivity of a Pod
:param args: arguments coming from the CLI.
"""
from jina.checker import NetworkChecker
NetworkChecker(args)
def dryrun(args: 'Namespace'):
"""
Check the health of a Flow
:param args: arguments coming from the CLI.
"""
from jina.checker import dry_run_checker
dry_run_checker(args)
def client(args: 'Namespace'):
"""
Start a client connects to the gateway
:param args: arguments coming from the CLI.
"""
from jina.clients import Client
Client(args)
def export(args: 'Namespace'):
"""
Export the API
:param args: arguments coming from the CLI.
"""
from jina import exporter
getattr(exporter, f'export_{args.export.replace("-", "_")}')(args)
def flow(args: 'Namespace'):
"""
Start a Flow from a YAML file or a docker image
:param args: arguments coming from the CLI.
"""
from jina import Flow
if args.uses:
f = Flow.load_config(args.uses)
with f:
f.block()
else:
raise ValueError('start a flow from CLI requires a valid `--uses`')
def hub(args: 'Namespace'):
"""
Start a hub builder for push, pull
:param args: arguments coming from the CLI.
"""
from jina.hubble.hubio import HubIO
getattr(HubIO(args), args.hub)()
def new(args: 'Namespace'):
"""
Create a new jina project
:param args: arguments coming from the CLI.
"""
import os
import shutil
from jina import __resources_path__
shutil.copytree(
os.path.join(__resources_path__, 'project-template'), os.path.abspath(args.name)
)
def help(args: 'Namespace'):
"""
Lookup the usage of certain argument in Jina API.
:param args: arguments coming from the CLI.
"""
from jina_cli.lookup import lookup_and_print
lookup_and_print(args.query.lower())
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from argparse import Namespace
def deployment(args: 'Namespace'):
"""
Start a Deployment
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.deployments import Deployment
try:
with Deployment(args) as d:
d.join()
except KeyboardInterrupt:
pass
def pod(args: 'Namespace'):
"""
Start a Pod
:param args: arguments coming from the CLI.
"""
from jina.orchestrate.pods.factory import PodFactory
try:
with PodFactory.build_pod(args) as p:
p.join()
except KeyboardInterrupt:
pass
def executor_native(args: 'Namespace'):
"""
Starts an Executor in a WorkerRuntime
:param args: arguments coming from the CLI.
"""
if args.runtime_cls == 'WorkerRuntime':
from jina.serve.runtimes.worker import WorkerRuntime
runtime_cls = WorkerRuntime
elif args.runtime_cls == 'HeadRuntime':
from jina.serve.runtimes.head import HeadRuntime
runtime_cls = HeadRuntime
else:
raise RuntimeError(
f' runtime_cls {args.runtime_cls} is not supported with `--native` argument. `WorkerRuntime` is supported'
)
with runtime_cls(args) as rt:
name = (
rt._data_request_handler._executor.metas.name
if hasattr(rt, '_data_request_handler')
else rt.name
)
rt.logger.info(f'Executor {name} started')
rt.run_forever()
def executor(args: 'Namespace'):
"""
Starts an Executor in any Runtime
:param args: arguments coming from the CLI.
:returns: return the same as `pod` or `worker_runtime`
"""
if args.native:
return executor_native(args)
else:
return pod(args)
def worker_runtime(args: 'Namespace'):
"""
Starts a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.serve.runtimes.worker import WorkerRuntime
with WorkerRuntime(args) as runtime:
runtime.logger.info(
f'Executor {runtime._data_request_handler._executor.metas.name} started'
)
runtime.run_forever()
def gateway(args: 'Namespace'):
"""
Start a Gateway Deployment
:param args: arguments coming from the CLI.
"""
from jina.enums import GatewayProtocolType
from jina.serve.runtimes import get_runtime
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCGatewayRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime',
GatewayProtocolType.HTTP: 'HTTPGatewayRuntime',
}
runtime_cls = get_runtime(gateway_runtime_dict[args.protocol])
with runtime_cls(args) as runtime:
runtime.logger.info(
f'Gateway with protocol {gateway_runtime_dict[args.protocol]} started'
)
runtime.run_forever()
def ping(args: 'Namespace'):
"""
Check the connectivity of a Pod
:param args: arguments coming from the CLI.
"""
from jina.checker import NetworkChecker
NetworkChecker(args)
def client(args: 'Namespace'):
"""
Start a client connects to the gateway
:param args: arguments coming from the CLI.
"""
from jina.clients import Client
Client(args)
def export(args: 'Namespace'):
"""
Export the API
:param args: arguments coming from the CLI.
"""
from jina import exporter
getattr(exporter, f'export_{args.export.replace("-", "_")}')(args)
def flow(args: 'Namespace'):
"""
Start a Flow from a YAML file or a docker image
:param args: arguments coming from the CLI.
"""
from jina import Flow
if args.uses:
f = Flow.load_config(args.uses)
with f:
f.block()
else:
raise ValueError('start a flow from CLI requires a valid `--uses`')
def hub(args: 'Namespace'):
"""
Start a hub builder for push, pull
:param args: arguments coming from the CLI.
"""
from jina.hubble.hubio import HubIO
getattr(HubIO(args), args.hub)()
def new(args: 'Namespace'):
"""
Create a new jina project
:param args: arguments coming from the CLI.
"""
import os
import shutil
from jina import __resources_path__
shutil.copytree(
os.path.join(__resources_path__, 'project-template'), os.path.abspath(args.name)
)
def help(args: 'Namespace'):
"""
Lookup the usage of certain argument in Jina API.
:param args: arguments coming from the CLI.
"""
from jina_cli.lookup import lookup_and_print
lookup_and_print(args.query.lower())
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .mask_pseudo_sampler import MaskPseudoSampler
from .mask_sampling_result import MaskSamplingResult
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
'MaskSamplingResult'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook'
]
|
import urllib.request
from typing import List
from defusedxml.ElementTree import fromstring
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.web.async_web.base import AsyncWebPageReader
class SitemapReader(BaseReader):
"""
Asynchronous sitemap reader for web.
Reads pages from the web based on their sitemap.xml.
Args:
sitemap_url (string): Path to the sitemap.xml. e.g. https://gpt-index.readthedocs.io/sitemap.xml
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
limit (int): Maximum number of concurrent requests.
"""
xml_schema_sitemap = "http://www.sitemaps.org/schemas/sitemap/0.9"
def __init__(self, html_to_text: bool = False, limit: int = 10) -> None:
"""Initialize with parameters."""
self._async_loader = AsyncWebPageReader(html_to_text=html_to_text, limit=limit)
self._html_to_text = html_to_text
self._limit = limit
def _load_sitemap(self, sitemap_url: str) -> str:
sitemap_url_request = urllib.request.urlopen(sitemap_url)
return sitemap_url_request.read()
def _parse_sitemap(self, raw_sitemap: str, filter_locs: str = None) -> list:
sitemap = fromstring(raw_sitemap)
sitemap_urls = []
for url in sitemap.findall(f"{{{self.xml_schema_sitemap}}}url"):
location = url.find(f"{{{self.xml_schema_sitemap}}}loc").text
if filter_locs is None or filter_locs in location:
sitemap_urls.append(location)
return sitemap_urls
def load_data(self, sitemap_url: str, filter: str = None) -> List[Document]:
sitemap = self._load_sitemap(sitemap_url=sitemap_url)
sitemap_urls = self._parse_sitemap(sitemap, filter)
return self._async_loader.load_data(urls=sitemap_urls)
|
import urllib.request
import xml.etree.ElementTree as ET
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.web.async_web.base import AsyncWebPageReader
class SitemapReader(BaseReader):
"""Asynchronous sitemap reader for web.
Reads pages from the web based on their sitemap.xml.
Args:
sitemap_url (string): Path to the sitemap.xml. e.g. https://gpt-index.readthedocs.io/sitemap.xml
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
limit (int): Maximum number of concurrent requests.
"""
xml_schema_sitemap = "http://www.sitemaps.org/schemas/sitemap/0.9"
def __init__(self, html_to_text: bool = False, limit: int = 10) -> None:
"""Initialize with parameters."""
self._async_loader = AsyncWebPageReader(html_to_text=html_to_text, limit=limit)
self._html_to_text = html_to_text
self._limit = limit
def _load_sitemap(self, sitemap_url: str) -> str:
sitemap_url_request = urllib.request.urlopen(sitemap_url)
return sitemap_url_request.read()
def _parse_sitemap(self, raw_sitemap: str, filter_locs: str = None) -> list:
sitemap = ET.fromstring(raw_sitemap)
sitemap_urls = []
for url in sitemap.findall(f"{{{self.xml_schema_sitemap}}}url"):
location = url.find(f"{{{self.xml_schema_sitemap}}}loc").text
if filter_locs is None or filter_locs in location:
sitemap_urls.append(location)
return sitemap_urls
def load_data(self, sitemap_url: str, filter: str = None) -> List[Document]:
sitemap = self._load_sitemap(sitemap_url=sitemap_url)
sitemap_urls = self._parse_sitemap(sitemap, filter)
return self._async_loader.load_data(urls=sitemap_urls)
|
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
"""A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
self._iterations.assign_add(1)
|
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
"""A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self.iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
self.iterations.assign_add(1)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ....models import UNet2DModel
from ....schedulers import PNDMScheduler
from ....utils.torch_utils import randn_tensor
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class PNDMPipeline(DiffusionPipeline):
r"""
Pipeline for unconditional image generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet2DModel`]):
A `UNet2DModel` to denoise the encoded image latents.
scheduler ([`PNDMScheduler`]):
A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image.
"""
unet: UNet2DModel
scheduler: PNDMScheduler
def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler):
super().__init__()
scheduler = PNDMScheduler.from_config(scheduler.config)
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 50,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, `optional`, defaults to 1):
The number of images to generate.
num_inference_steps (`int`, `optional`, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
generator (`torch.Generator`, `optional`):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
output_type (`str`, `optional`, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
Example:
```py
>>> from diffusers import PNDMPipeline
>>> # load model and scheduler
>>> pndm = PNDMPipeline.from_pretrained("google/ddpm-cifar10-32")
>>> # run pipeline in inference (sample random noise and denoise)
>>> image = pndm().images[0]
>>> # save image
>>> image.save("pndm_generated_image.png")
```
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images.
"""
# For more information on the sampling method you can take a look at Algorithm 2 of
# the official paper: https://huggingface.co/papers/2202.09778
# Sample gaussian noise to begin loop
image = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
device=self.device,
)
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
model_output = self.unet(image, t).sample
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ....models import UNet2DModel
from ....schedulers import PNDMScheduler
from ....utils.torch_utils import randn_tensor
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class PNDMPipeline(DiffusionPipeline):
r"""
Pipeline for unconditional image generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet2DModel`]):
A `UNet2DModel` to denoise the encoded image latents.
scheduler ([`PNDMScheduler`]):
A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image.
"""
unet: UNet2DModel
scheduler: PNDMScheduler
def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler):
super().__init__()
scheduler = PNDMScheduler.from_config(scheduler.config)
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 50,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, `optional`, defaults to 1):
The number of images to generate.
num_inference_steps (`int`, `optional`, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
generator (`torch.Generator`, `optional`):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
output_type (`str`, `optional`, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
Example:
```py
>>> from diffusers import PNDMPipeline
>>> # load model and scheduler
>>> pndm = PNDMPipeline.from_pretrained("google/ddpm-cifar10-32")
>>> # run pipeline in inference (sample random noise and denoise)
>>> image = pndm().images[0]
>>> # save image
>>> image.save("pndm_generated_image.png")
```
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images.
"""
# For more information on the sampling method you can take a look at Algorithm 2 of
# the official paper: https://huggingface.co/papers/2202.09778
# Sample gaussian noise to begin loop
image = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
device=self.device,
)
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
model_output = self.unet(image, t).sample
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
|
# model settings
model = dict(
type='RPN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='RPN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
sampler=dict(
type='ScoreHLRSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2, bias=0),
carl=dict(k=1, bias=0.2))),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
sampler=dict(
type='ScoreHLRSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2, bias=0),
carl=dict(k=1, bias=0.2))),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
import random
from collections import defaultdict
import torch
from torch.utils.data.sampler import Sampler
def create_groups(groups, k):
"""Bins sample indices with respect to groups, remove bins with less than k samples
Args:
groups (list[int]): where ith index stores ith sample's group id
Returns:
defaultdict[list]: Bins of sample indices, binned by group_idx
"""
group_samples = defaultdict(list)
for sample_idx, group_idx in enumerate(groups):
group_samples[group_idx].append(sample_idx)
keys_to_remove = []
for key in group_samples:
if len(group_samples[key]) < k:
keys_to_remove.append(key)
continue
for key in keys_to_remove:
group_samples.pop(key)
return group_samples
class PKSampler(Sampler):
"""
Randomly samples from a dataset while ensuring that each batch (of size p * k)
includes samples from exactly p labels, with k samples for each label.
Args:
groups (list[int]): List where the ith entry is the group_id/label of the ith sample in the dataset.
p (int): Number of labels/groups to be sampled from in a batch
k (int): Number of samples for each label/group in a batch
"""
def __init__(self, groups, p, k):
self.p = p
self.k = k
self.groups = create_groups(groups, self.k)
# Ensures there are enough classes to sample from
if len(self.groups) < p:
raise ValueError("There are not enough classes to sample from")
def __iter__(self):
# Shuffle samples within groups
for key in self.groups:
random.shuffle(self.groups[key])
# Keep track of the number of samples left for each group
group_samples_remaining = {}
for key in self.groups:
group_samples_remaining[key] = len(self.groups[key])
while len(group_samples_remaining) > self.p:
# Select p groups at random from valid/remaining groups
group_ids = list(group_samples_remaining.keys())
selected_group_idxs = torch.multinomial(torch.ones(len(group_ids)), self.p).tolist()
for i in selected_group_idxs:
group_id = group_ids[i]
group = self.groups[group_id]
for _ in range(self.k):
# No need to pick samples at random since group samples are shuffled
sample_idx = len(group) - group_samples_remaining[group_id]
yield group[sample_idx]
group_samples_remaining[group_id] -= 1
# Don't sample from group if it has less than k samples remaining
if group_samples_remaining[group_id] < self.k:
group_samples_remaining.pop(group_id)
|
import random
from collections import defaultdict
import torch
from torch.utils.data.sampler import Sampler
def create_groups(groups, k):
"""Bins sample indices with respect to groups, remove bins with less than k samples
Args:
groups (list[int]): where ith index stores ith sample's group id
Returns:
defaultdict[list]: Bins of sample indices, binned by group_idx
"""
group_samples = defaultdict(list)
for sample_idx, group_idx in enumerate(groups):
group_samples[group_idx].append(sample_idx)
keys_to_remove = []
for key in group_samples:
if len(group_samples[key]) < k:
keys_to_remove.append(key)
continue
for key in keys_to_remove:
group_samples.pop(key)
return group_samples
class PKSampler(Sampler):
"""
Randomly samples from a dataset while ensuring that each batch (of size p * k)
includes samples from exactly p labels, with k samples for each label.
Args:
groups (list[int]): List where the ith entry is the group_id/label of the ith sample in the dataset.
p (int): Number of labels/groups to be sampled from in a batch
k (int): Number of samples for each label/group in a batch
"""
def __init__(self, groups, p, k):
self.p = p
self.k = k
self.groups = create_groups(groups, self.k)
# Ensures there are enough classes to sample from
if len(self.groups) < p:
raise ValueError("There are not enought classes to sample from")
def __iter__(self):
# Shuffle samples within groups
for key in self.groups:
random.shuffle(self.groups[key])
# Keep track of the number of samples left for each group
group_samples_remaining = {}
for key in self.groups:
group_samples_remaining[key] = len(self.groups[key])
while len(group_samples_remaining) > self.p:
# Select p groups at random from valid/remaining groups
group_ids = list(group_samples_remaining.keys())
selected_group_idxs = torch.multinomial(torch.ones(len(group_ids)), self.p).tolist()
for i in selected_group_idxs:
group_id = group_ids[i]
group = self.groups[group_id]
for _ in range(self.k):
# No need to pick samples at random since group samples are shuffled
sample_idx = len(group) - group_samples_remaining[group_id]
yield group[sample_idx]
group_samples_remaining[group_id] -= 1
# Don't sample from group if it has less than k samples remaining
if group_samples_remaining[group_id] < self.k:
group_samples_remaining.pop(group_id)
|
"""Feedly Rss Reader."""
import json
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FeedlyRssReader(BaseReader):
"""
Feedly Rss Reader.
Get entries from Feedly Rss Reader
Uses Feedly Official python-api-client: https://github.com/feedly/python-api-client
"""
def __init__(self, bearer_token: str) -> None:
"""Initialize with parameters."""
super().__init__()
self.bearer_token = bearer_token
def setup_auth(
self, directory: Path = Path.home() / ".config/feedly", overwrite: bool = False
):
"""
Modified from python-api-client/feedly/api_client/utils.py
Instead promopting for user input, we take the token as an argument.
"""
directory.mkdir(exist_ok=True, parents=True)
auth_file = directory / "access.token"
if not auth_file.exists() or overwrite:
auth = self.bearer_token
auth_file.write_text(auth.strip())
def load_data(self, category_name, max_count=100):
"""Get the entries from a feedly category."""
from feedly.api_client.session import FeedlySession
from feedly.api_client.stream import StreamOptions
self.setup_auth(overwrite=True)
sess = FeedlySession()
category = sess.user.user_categories.get(category_name)
documents = []
for article in category.stream_contents(
options=StreamOptions(max_count=max_count)
):
# doc for available fields: https://developer.feedly.com/v3/streams/
entry = {
"title": article["title"],
"published": article["published"],
"summary": article["summary"],
"author": article["author"],
"content": article["content"],
"keywords": article["keywords"],
"commonTopics": article["commonTopics"],
}
text = json.dumps(entry, ensure_ascii=False)
documents.append(Document(text=text))
return documents
|
"""Feedly Rss Reader."""
import json
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FeedlyRssReader(BaseReader):
"""Feedly Rss Reader.
Get entries from Feedly Rss Reader
Uses Feedly Official python-api-client: https://github.com/feedly/python-api-client
"""
def __init__(self, bearer_token: str) -> None:
"""Initialize with parameters."""
super().__init__()
self.bearer_token = bearer_token
def setup_auth(
self, directory: Path = Path.home() / ".config/feedly", overwrite: bool = False
):
"""Modified from python-api-client/feedly/api_client/utils.py
Instead promopting for user input, we take the token as an argument.
"""
directory.mkdir(exist_ok=True, parents=True)
auth_file = directory / "access.token"
if not auth_file.exists() or overwrite:
auth = self.bearer_token
auth_file.write_text(auth.strip())
def load_data(self, category_name, max_count=100):
"""Get the entries from a feedly category."""
from feedly.api_client.session import FeedlySession
from feedly.api_client.stream import StreamOptions
self.setup_auth(overwrite=True)
sess = FeedlySession()
category = sess.user.user_categories.get(category_name)
documents = []
for article in category.stream_contents(
options=StreamOptions(max_count=max_count)
):
# doc for available fields: https://developer.feedly.com/v3/streams/
entry = {
"title": article["title"],
"published": article["published"],
"summary": article["summary"],
"author": article["author"],
"content": article["content"],
"keywords": article["keywords"],
"commonTopics": article["commonTopics"],
}
text = json.dumps(entry, ensure_ascii=False)
documents.append(Document(text=text))
return documents
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import Embedding
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an Embedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray import Text
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[Embedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[Embedding] = None
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import Embedding
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an Embedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray import Text
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[Embedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, Image, Text
# compose it
class MultiModalDoc(Document):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[Embedding] = None
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}")
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
_base_ = './rtmdet-ins_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './rtmdet-ins_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = 'solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomHueTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomHue,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_hue_inference(self):
seed = 3481
layer = layers.RandomHue(0.2, [0, 1.0])
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_random_hue_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomHue(0.2, (0, 255))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_hue_no_change_with_zero_factor(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = keras.random.randint((224, 224, 3), 0, 255)
else:
inputs = keras.random.randint((3, 224, 224), 0, 255)
layer = layers.RandomHue(0, (0, 255), data_format=data_format)
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_hue_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomHue(0.2, (0, 255))
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomHue(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
import numpy as np
import pytest
from tensorflow import data as tf_data
import keras
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomHueTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.RandomHue,
init_kwargs={
"factor": 0.75,
"value_range": (20, 200),
"seed": 1,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_random_hue_value_range(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)
layer = layers.RandomHue(0.2, (0, 255))
adjusted_image = layer(image)
self.assertTrue(keras.ops.numpy.all(adjusted_image >= 0))
self.assertTrue(keras.ops.numpy.all(adjusted_image <= 1))
def test_random_hue_no_change_with_zero_factor(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
inputs = keras.random.randint((224, 224, 3), 0, 255)
else:
inputs = keras.random.randint((3, 224, 224), 0, 255)
layer = layers.RandomHue(0, (0, 255), data_format=data_format)
output = layer(inputs, training=False)
self.assertAllClose(inputs, output, atol=1e-3, rtol=1e-5)
def test_random_hue_randomness(self):
image = keras.random.uniform(shape=(3, 3, 3), minval=0, maxval=1)[:5]
layer = layers.RandomHue(0.2, (0, 255))
adjusted_images = layer(image)
self.assertNotAllClose(adjusted_images, image)
def test_tf_data_compatibility(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_data = np.random.random((2, 8, 8, 3))
else:
input_data = np.random.random((2, 3, 8, 8))
layer = layers.RandomHue(
factor=0.5, value_range=[0, 1], data_format=data_format, seed=1337
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
|
# Owner(s): ["oncall: distributed checkpointing"]
import unittest.mock as mock
from torch.distributed.checkpoint._experimental.barriers import TCPStoreBarrier
from torch.distributed.checkpoint._experimental.types import RankInfo
from torch.testing._internal.common_utils import run_tests, TestCase
class TestBarriers(TestCase):
@mock.patch("torch.distributed.TCPStore")
@mock.patch("torch.distributed.elastic.utils.store.barrier")
def test_tcpstore_barrier_initialization(self, _, mock_tcpstore):
"""Test that TCPStoreBarrier initializes correctly."""
# Setup
timeout_barrier_init_secs = 60
barrier_prefix = "test_barrier"
world_size = 4
use_checkpoint_barrier_tcpstore_libuv = True
tcpstore_port = 12345
master_address = "localhost"
rank = 0
timeout_secs = 30
# Create rank_info
rank_info = RankInfo(global_rank=rank, global_world_size=world_size)
# Create the barrier (used for verification)
_ = TCPStoreBarrier(
global_rank=rank_info.global_rank,
global_world_size=rank_info.global_world_size,
barrier_prefix=barrier_prefix,
timeout_barrier_init_secs=timeout_barrier_init_secs,
use_checkpoint_barrier_tcpstore_libuv=use_checkpoint_barrier_tcpstore_libuv,
tcpstore_port=tcpstore_port,
master_address=master_address,
timeout_secs=timeout_secs,
)
# Verify that TCPStore was initialized with the correct parameters
mock_tcpstore.assert_called_once_with(
master_address,
tcpstore_port,
world_size=rank_info.global_world_size,
timeout=mock.ANY, # timedelta is hard to compare directly
is_master=(rank_info.global_rank == 0),
)
@mock.patch("torch.distributed.TCPStore")
@mock.patch("torch.distributed.elastic.utils.store.barrier")
def test_execute_barrier(self, mock_barrier, mock_tcpstore):
"""Test that execute_barrier calls the barrier function correctly."""
# Setup
barrier_prefix = "test_barrier"
timeout_barrier_init_secs = 60
world_size = 4
use_checkpoint_barrier_tcpstore_libuv = True
tcpstore_port = 12345
master_address = "localhost"
rank = 0
timeout_secs = 30
# Create rank_info
rank_info = RankInfo(global_rank=rank, global_world_size=world_size)
# Mock the TCPStore instance
mock_tcpstore_instance = mock.MagicMock()
mock_tcpstore.return_value = mock_tcpstore_instance
# Create the barrier
barrier = TCPStoreBarrier(
global_rank=rank_info.global_rank,
global_world_size=rank_info.global_world_size,
barrier_prefix=barrier_prefix,
timeout_barrier_init_secs=timeout_barrier_init_secs,
use_checkpoint_barrier_tcpstore_libuv=use_checkpoint_barrier_tcpstore_libuv,
tcpstore_port=tcpstore_port,
master_address=master_address,
timeout_secs=timeout_secs,
)
# Execute the barrier
barrier.execute_barrier()
# Verify that the TCPStore's set method was called with the correct parameters
mock_tcpstore_instance.set.assert_called_once_with("rank0", "0")
# Verify that the barrier function was called with the correct parameters
mock_barrier.assert_called_once_with(
store=mock_tcpstore_instance,
world_size=rank_info.global_world_size,
key_prefix=barrier_prefix + "0",
)
# Execute the barrier again to test sequence number increment
barrier.execute_barrier()
# Verify that the TCPStore's set method was called with the incremented sequence number
mock_tcpstore_instance.set.assert_called_with("rank0", "1")
# Verify that the barrier function was called with the incremented sequence number
mock_barrier.assert_called_with(
store=mock_tcpstore_instance,
world_size=rank_info.global_world_size,
key_prefix=barrier_prefix + "1",
)
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["oncall: distributed checkpointing"]
import unittest.mock as mock
from torch.distributed.checkpoint._experimental.barriers import TCPStoreBarrier
from torch.testing._internal.common_utils import run_tests, TestCase
class TestBarriers(TestCase):
@mock.patch("torch.distributed.TCPStore")
@mock.patch("torch.distributed.elastic.utils.store.barrier")
def test_tcpstore_barrier_initialization(self, _, mock_tcpstore):
"""Test that TCPStoreBarrier initializes correctly."""
# Setup
timeout_barrier_init_secs = 60
barrier_prefix_list = ["test_barrier", "another_barrier"]
world_size = 4
use_checkpoint_barrier_tcpstore_libuv = True
tcpstore_port = 12345
master_address = "localhost"
rank = 0
local_world_size = 1
# Create the barrier
barrier = TCPStoreBarrier(
timeout_barrier_init_secs=timeout_barrier_init_secs,
barrier_prefix_list=barrier_prefix_list,
world_size=world_size,
use_checkpoint_barrier_tcpstore_libuv=use_checkpoint_barrier_tcpstore_libuv,
tcpstore_port=tcpstore_port,
master_address=master_address,
rank=rank,
local_world_size=local_world_size,
)
# Verify that TCPStore was initialized correctly for each barrier prefix
self.assertEqual(len(barrier._tcp_store_dict), len(barrier_prefix_list))
for prefix in barrier_prefix_list:
self.assertIn(prefix, barrier._tcp_store_dict)
# Verify that TCPStore was initialized with the correct parameters
mock_tcpstore.assert_any_call(
master_address,
tcpstore_port,
world_size=world_size,
timeout=mock.ANY, # timedelta is hard to compare directly
use_libuv=use_checkpoint_barrier_tcpstore_libuv,
)
@mock.patch("torch.distributed.TCPStore")
@mock.patch("torch.distributed.elastic.utils.store.barrier")
def test_execute_barrier(self, mock_barrier, mock_tcpstore):
"""Test that execute_barrier calls the barrier function correctly."""
# Setup
barrier_prefix = "test_barrier"
timeout_barrier_init_secs = 60
barrier_prefix_list = ["test_barrier"]
world_size = 4
use_checkpoint_barrier_tcpstore_libuv = True
tcpstore_port = 12345
master_address = "localhost"
rank = 0
local_world_size = 1
timeout_secs = 30
# Mock the TCPStore instance
mock_tcpstore_instance = mock.MagicMock()
mock_tcpstore.return_value = mock_tcpstore_instance
# Create the barrier
barrier = TCPStoreBarrier(
timeout_barrier_init_secs=timeout_barrier_init_secs,
barrier_prefix_list=barrier_prefix_list,
world_size=world_size,
use_checkpoint_barrier_tcpstore_libuv=use_checkpoint_barrier_tcpstore_libuv,
tcpstore_port=tcpstore_port,
master_address=master_address,
rank=rank,
local_world_size=local_world_size,
)
# Execute the barrier
barrier.execute_barrier(barrier_prefix, timeout_secs)
# Verify that the TCPStore's set method was called with the correct parameters
mock_tcpstore_instance.set.assert_called_once_with("rank0", "0")
# Verify that the barrier function was called with the correct parameters
mock_barrier.assert_called_once_with(
store=mock_tcpstore_instance,
world_size=world_size,
key_prefix=barrier_prefix + "0",
)
# Execute the barrier again to test sequence number increment
barrier.execute_barrier(barrier_prefix, timeout_secs)
# Verify that the TCPStore's set method was called with the incremented sequence number
mock_tcpstore_instance.set.assert_called_with("rank0", "1")
# Verify that the barrier function was called with the incremented sequence number
mock_barrier.assert_called_with(
store=mock_tcpstore_instance,
world_size=world_size,
key_prefix=barrier_prefix + "1",
)
if __name__ == "__main__":
run_tests()
|
from __future__ import annotations
from typing import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from __future__ import annotations
from typing import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
assert subset in ["train", "valid", "test"], "`subset` must be one of ['train', 'valid', 'test']"
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, str, str, str, str):
``(waveform, sample_rate, file_name, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
file_name = sample[self.header.index("path")].split("/")[-1]
file_name = file_name.split(".")[0]
speaker_id, transcription, action, obj, location = sample[2:]
wav_path = os.path.join(self._path, "wavs", "speakers", speaker_id, f"{file_name}.wav")
wav, sample_rate = torchaudio.load(wav_path)
return wav, sample_rate, file_name, speaker_id, transcription, action, obj, location
|
import csv
import os
from pathlib import Path
from typing import Union
import torchaudio
from torch.utils.data import Dataset
class FluentSpeechCommands(Dataset):
"""Create *Fluent Speech Commands* [:footcite:`fluent`] Dataset
Args:
root (str of Path): Path to the directory where the dataset is found.
subset (str, optional): subset of the dataset to use. Options: [`"train"`, `"valid"`, `"test"`].
(Default: ``"train"``)
"""
def __init__(self, root: Union[str, Path], subset: str = "train"):
assert subset in ["train", "valid", "test"], "`subset` must be one of ['train', 'valid', 'test']"
root = os.fspath(root)
self._path = os.path.join(root, "fluent_speech_commands_dataset")
subset_path = os.path.join(self._path, "data", f"{subset}_data.csv")
with open(subset_path) as subset_csv:
subset_reader = csv.reader(subset_csv)
data = list(subset_reader)
self.header = data[0]
self.data = data[1:]
def __len__(self):
return len(self.data)
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Path, int, str, str, str, str):
``(waveform, sample_rate, path, speaker_id, transcription, action, object, location)``
"""
sample = self.data[n]
wav_path = os.path.join(self._path, sample[self.header.index("path")])
wav, sample_rate = torchaudio.load(wav_path)
path = Path(wav_path).stem
speaker_id, transcription, action, obj, location = sample[2:]
return wav, sample_rate, path, speaker_id, transcription, action, obj, location
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
roi_head=dict(
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
sampler=dict(
_delete_=True,
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
data = dict(
train=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'),
val=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'),
test=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
|
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
roi_head=dict(
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
sampler=dict(
_delete_=True,
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
data = dict(
train=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'),
val=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'),
test=dict(proposal_file=data_root +
'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
|
import functools
import torch
import torch._custom_ops
import torch.library
# Ensure that torch.ops.torchvision is visible
import torchvision.extension # noqa: F401
@functools.lru_cache(None)
def get_meta_lib():
return torch.library.Library("torchvision", "IMPL", "Meta")
def register_meta(op_name, overload_name="default"):
def wrapper(fn):
if torchvision.extension._has_ops():
get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)
return fn
return wrapper
@register_meta("roi_align")
def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
num_rois = rois.size(0)
_, channels, height, width = input.size()
return input.new_empty((num_rois, channels, pooled_height, pooled_width))
@register_meta("_roi_align_backward")
def meta_roi_align_backward(
grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
@torch._custom_ops.impl_abstract("torchvision::nms")
def meta_nms(dets, scores, iou_threshold):
torch._check(dets.dim() == 2, lambda: f"boxes should be a 2d tensor, got {dets.dim()}D")
torch._check(dets.size(1) == 4, lambda: f"boxes should have 4 elements in dimension 1, got {dets.size(1)}")
torch._check(scores.dim() == 1, lambda: f"scores should be a 1d tensor, got {scores.dim()}")
torch._check(
dets.size(0) == scores.size(0),
lambda: f"boxes and scores should have same number of elements in dimension 0, got {dets.size(0)} and {scores.size(0)}",
)
ctx = torch._custom_ops.get_ctx()
num_to_keep = ctx.create_unbacked_symint()
return dets.new_empty(num_to_keep, dtype=torch.long)
|
import functools
import torch
import torch.library
# Ensure that torch.ops.torchvision is visible
import torchvision.extension # noqa: F401
@functools.lru_cache(None)
def get_meta_lib():
return torch.library.Library("torchvision", "IMPL", "Meta")
def register_meta(op_name, overload_name="default"):
def wrapper(fn):
if torchvision.extension._has_ops():
get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)
return fn
return wrapper
@register_meta("roi_align")
def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
torch._check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
torch._check(
input.dtype == rois.dtype,
lambda: (
"Expected tensor for input to have the same type as tensor for rois; "
f"but type {input.dtype} does not equal {rois.dtype}"
),
)
num_rois = rois.size(0)
_, channels, height, width = input.size()
return input.new_empty((num_rois, channels, pooled_height, pooled_width))
@register_meta("_roi_align_backward")
def meta_roi_align_backward(
grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
):
torch._check(
grad.dtype == rois.dtype,
lambda: (
"Expected tensor for grad to have the same type as tensor for rois; "
f"but type {grad.dtype} does not equal {rois.dtype}"
),
)
return grad.new_empty((batch_size, channels, height, width))
|
__version__ = '0.13.34'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.34'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from __future__ import annotations
from enum import Enum
from typing import Callable
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhattan_sim,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: str | "SimilarityFunction",
) -> Callable[[Tensor | ndarray, Tensor | ndarray], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: str | "SimilarityFunction",
) -> Callable[[Tensor | ndarray, Tensor | ndarray], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values() -> list[str]:
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
from enum import Enum
from typing import Callable, List, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhattan_sim,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values() -> List[str]:
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
# flake8: noqa
import torchaudio
from torchaudio._backend.utils import get_info_func, get_load_func, get_save_func
from . import utils
from .utils import _is_backend_dispatcher_enabled, get_audio_backend, list_audio_backends, set_audio_backend
if _is_backend_dispatcher_enabled():
torchaudio.info = get_info_func()
torchaudio.load = get_load_func()
torchaudio.save = get_save_func()
else:
utils._init_audio_backend()
|
# flake8: noqa
from . import utils
from .utils import get_audio_backend, list_audio_backends, set_audio_backend
utils._init_audio_backend()
|
_base_ = './faster-rcnn_hrnetv2p-w18-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field, BaseModel
from typing import Optional, Literal
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str = Field(description="The text of the document", default="")
class EmbeddingResponseModel(TextDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class Parameters(BaseModel):
task: Optional[
Literal[
"retrieval.query",
"retrieval.passage",
"text-matching",
"classification",
"separation",
]
] = None
late_chunking: bool = False
dimensions: Optional[int] = None
class SampleExecutor(Executor):
@requests(on="/encode")
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, 64)),
)
)
return DocList[EmbeddingResponseModel](ret)
@requests(on="/encode_parameter")
def bar(self, docs: DocList[TextDoc], parameters: Parameters, **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, parameters.dimensions)),
)
)
return DocList[EmbeddingResponseModel](ret)
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field, BaseModel
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str = Field(description="The text of the document", default="")
class EmbeddingResponseModel(TextDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class Parameters(BaseModel):
emb_dim: int
class SampleExecutor(Executor):
@requests(on="/encode")
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, 64)),
)
)
return DocList[EmbeddingResponseModel](ret)
@requests(on="/encode_parameter")
def bar(self, docs: DocList[TextDoc], parameters: Parameters, **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(
id=doc.id,
text=doc.text,
embeddings=np.random.random((1, parameters.emb_dim)),
)
)
return DocList[EmbeddingResponseModel](ret)
|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat, is_rotated_bounding_format
from ._image import Image
from ._keypoints import KeyPoints
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to
# RecursionError: maximum recursion depth exceeded while calling a Python object
# Until `disable` is removed, there will be graph breaks after all calls to functional transforms
@torch.compiler.disable
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format", "canvas_size" and "clamping_mode" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
clamping_mode=kwargs.get("clamping_mode", like.clamping_mode),
)
elif isinstance(like, KeyPoints):
return KeyPoints._wrap(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size))
else:
return wrappee.as_subclass(type(like))
|
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat, is_rotated_bounding_format
from ._image import Image
from ._keypoints import KeyPoints
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to
# RecursionError: maximum recursion depth exceeded while calling a Python object
# Until `disable` is removed, there will be graph breaks after all calls to functional transforms
@torch.compiler.disable
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
elif isinstance(like, KeyPoints):
return KeyPoints._wrap(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size))
else:
return wrappee.as_subclass(type(like))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.