input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] | None = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Dict, List
import requests
HEALTH_CHECK_OP = '%2Fgrpc.health.v1.Health%2FCheck'
def parse_string_jaeger_tags(jaeger_tags: List) -> Dict[str, str]:
"""Parse jaeger tags into a dictionary"""
return {i['key']: i['value'] for i in jaeger_tags if i['type'] == 'string'}
def get_last_health_check_data(jaeger_port: int, service_name: str) -> dict:
"""Get most recent health check data from Jaeger API for a given service
Args:
jaeger_port: Port to forward to Jaeger API
service_name: Service to get health check data for
Returns:
Health check trace JSON (dict)
"""
return requests.get(
f'http://localhost:{jaeger_port}/api/traces?service={service_name}&limit=1&operation={HEALTH_CHECK_OP}'
).json()['data'][0]
|
import requests
from typing import List, Dict
HEALTH_CHECK_OP = '%2Fgrpc.health.v1.Health%2FCheck'
def parse_string_jaeger_tags(jaeger_tags: List) -> Dict[str, str]:
"""Parse jaeger tags into a dictionary"""
return {i['key']: i['value'] for i in jaeger_tags if i['type'] == 'string'}
def get_last_health_check_data(jaeger_port: int, service_name: str) -> dict:
"""Get most recent health check data from Jaeger API for a given service
Args:
jaeger_port: Port to forward to Jaeger API
service_name: Service to get health check data for
Returns:
Health check trace JSON (dict)
"""
return requests.get(f'http://localhost:{jaeger_port}/api/traces?service={service_name}&limit=1&operation={HEALTH_CHECK_OP}').json()['data'][0]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import STFT
from keras.src.initializers.constant_initializers import STFT as STFTInitializer
from keras.src.initializers.constant_initializers import STFT as stft
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import Orthogonal
from keras.src.initializers.random_initializers import (
Orthogonal as OrthogonalInitializer,
)
from keras.src.initializers.random_initializers import Orthogonal as orthogonal
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'CenterNetHead', 'YOLOXHead'
]
|
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'CenterNetHead', 'YOLOXHead'
]
|
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"TripletDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
import pytest
from jina import Client
from jina.enums import ProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', ProtocolType.HTTP),
('grpc', ProtocolType.GRPC),
('ws', ProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize('hostname', ['localhost', 'executor.jina.ai'])
def test_host_unpacking(protocol, gateway_type, tls, hostname):
port = 1234
protocol = f'{protocol}s' if tls and protocol else protocol
scheme = f'{protocol}://' if protocol else ''
host = f'{scheme}{hostname}:{port}'
c = Client(host=host) if scheme else Client(host=host, tls=tls)
if gateway_type:
assert c.args.protocol == gateway_type
assert c.args.host == hostname
assert c.args.port == port
assert c.args.tls == tls
@pytest.mark.parametrize('protocol', ['https', 'grpcs', 'wss'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port_tls(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 443
@pytest.mark.parametrize('protocol', ['http', 'grpc', 'ws'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 80
def test_delete_slash_host():
host = f'http://localhost/'
c = Client(host=host)
assert c.args.host == 'localhost'
def test_host_unpacking_basic():
protocol = 'http'
hostname = 'localhost'
host = f'{protocol}://{hostname}'
c = Client(host=host)
assert c.args.protocol == ProtocolType.HTTP
assert c.args.host == hostname
def test_host_unpacking_duplicate():
with pytest.raises(ValueError):
Client(host=f'http://localhost:1234', port=1234)
def test_log_config_arg():
cli_args = ['--log-config', 'logging.custom.yml']
from jina.parsers import set_client_cli_parser
args = set_client_cli_parser().parse_args(cli_args)
assert args.log_config == 'logging.custom.yml'
|
import pytest
from jina import Client
from jina.enums import GatewayProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', GatewayProtocolType.HTTP),
('grpc', GatewayProtocolType.GRPC),
('ws', GatewayProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize('hostname', ['localhost', 'executor.jina.ai'])
def test_host_unpacking(protocol, gateway_type, tls, hostname):
port = 1234
protocol = f'{protocol}s' if tls and protocol else protocol
scheme = f'{protocol}://' if protocol else ''
host = f'{scheme}{hostname}:{port}'
c = Client(host=host) if scheme else Client(host=host, tls=tls)
if gateway_type:
assert c.args.protocol == gateway_type
assert c.args.host == hostname
assert c.args.port == port
assert c.args.tls == tls
@pytest.mark.parametrize('protocol', ['https', 'grpcs', 'wss'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port_tls(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 443
@pytest.mark.parametrize('protocol', ['http', 'grpc', 'ws'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 80
def test_delete_slash_host():
host = f'http://localhost/'
c = Client(host=host)
assert c.args.host == 'localhost'
def test_host_unpacking_basic():
protocol = 'http'
hostname = 'localhost'
host = f'{protocol}://{hostname}'
c = Client(host=host)
assert c.args.protocol == GatewayProtocolType.HTTP
assert c.args.host == hostname
def test_host_unpacking_duplicate():
with pytest.raises(ValueError):
Client(host=f'http://localhost:1234', port=1234)
def test_log_config_arg():
cli_args = ['--log-config', 'logging.custom.yml']
from jina.parsers import set_client_cli_parser
args = set_client_cli_parser().parse_args(cli_args)
assert args.log_config == 'logging.custom.yml'
|
"""Standard LangChain interface tests"""
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatMistralAI
@property
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
from typing import Optional, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatMistralAI
@property
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}
@property
def supports_json_mode(self) -> bool:
return True
@property
def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice when used in tests."""
return "any"
|
import numpy as np
import pytest
import torch
from docarray import BaseDocument
from docarray.typing import AnyTensor, NdArray, TorchTensor
try:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowTensor
except (ImportError, TypeError):
TensorFlowTensor = None
def test_set_tensor():
class MyDocument(BaseDocument):
tensor: AnyTensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
@pytest.mark.tensorflow
def test_set_tensor():
class MyDocument(BaseDocument):
tensor: AnyTensor
d = MyDocument(tensor=tf.zeros((3, 224, 224)))
assert isinstance(d.tensor, TensorFlowTensor)
assert isinstance(d.tensor.tensor, tf.Tensor)
assert tnp.allclose(d.tensor.tensor, tf.zeros((3, 224, 224)))
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.typing import AnyTensor, NdArray, TorchTensor
def test_set_tensor():
class MyDocument(BaseDocument):
tensor: AnyTensor
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: int | None = None):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
import csv
import logging
import os
from typing import Optional
import numpy as np
from sklearn.metrics import ndcg_score
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 and NDCG@10 are computed to measure the quality of the ranking.
Args:
samples (List[Dict, str, Union[str, List[str]]): Must be a list and each element is of the form:
{'query': '', 'positive': [], 'negative': []}. Query is the search query, positive is a list
of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(
self, samples, at_k: int = 10, name: str = "", write_csv: bool = True, mrr_at_k: Optional[int] = None
):
self.samples = samples
self.name = name
if mrr_at_k is not None:
logger.warning(f"The `mrr_at_k` parameter has been deprecated; please use `at_k={mrr_at_k}` instead.")
self.at_k = mrr_at_k
else:
self.at_k = at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else "") + f"_results_@{self.at_k}.csv"
self.csv_headers = [
"epoch",
"steps",
"MRR@{}".format(self.at_k),
"NDCG@{}".format(self.at_k),
]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
all_ndcg_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance["query"]
positive = list(instance["positive"])
negative = list(instance["negative"])
docs = positive + negative
is_relevant = [1] * len(positive) + [0] * len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) # Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0 : self.at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank + 1)
break
all_mrr_scores.append(mrr_score)
all_ndcg_scores.append(ndcg_score([is_relevant], [pred_scores], k=self.at_k))
mean_mrr = np.mean(all_mrr_scores)
mean_ndcg = np.mean(all_ndcg_scores)
logger.info(
"Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(
num_queries,
np.min(num_positives),
np.mean(num_positives),
np.max(num_positives),
np.min(num_negatives),
np.mean(num_negatives),
np.max(num_negatives),
)
)
logger.info("MRR@{}: {:.2f}".format(self.at_k, mean_mrr * 100))
logger.info("NDCG@{}: {:.2f}".format(self.at_k, mean_ndcg * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr, mean_ndcg])
return mean_mrr
|
import subprocess
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=DPRTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=DPRTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
#!/usr/bin/env python
# Sorts what's new entries with per-module headings.
# Pass what's new entries on stdin.
import re
import sys
from collections import defaultdict
LABEL_ORDER = ["MajorFeature", "Feature", "Efficiency", "Enhancement", "Fix", "API"]
def entry_sort_key(s):
if s.startswith("- |"):
return LABEL_ORDER.index(s.split("|")[1])
else:
return -1
# discard headings and other non-entry lines
text = "".join(l for l in sys.stdin if l.startswith("- ") or l.startswith(" "))
bucketed = defaultdict(list)
for entry in re.split("\n(?=- )", text.strip()):
modules = re.findall(
r":(?:func|meth|mod|class):`(?:[^<`]*<|~)?(?:sklearn.)?([a-z]\w+)", entry
)
modules = set(modules)
if len(modules) > 1:
key = "Multiple modules"
elif modules:
key = ":mod:`sklearn.%s`" % next(iter(modules))
else:
key = "Miscellaneous"
bucketed[key].append(entry)
entry = entry.strip() + "\n"
everything = []
for key, bucket in sorted(bucketed.items()):
everything.append(key + "\n" + "." * len(key))
bucket.sort(key=entry_sort_key)
everything.extend(bucket)
print("\n\n".join(everything))
|
#!/usr/bin/env python
# Sorts what's new entries with per-module headings.
# Pass what's new entries on stdin.
import re
import sys
from collections import defaultdict
LABEL_ORDER = ["MajorFeature", "Feature", "Efficiency", "Enhancement", "Fix", "API"]
def entry_sort_key(s):
if s.startswith("- |"):
return LABEL_ORDER.index(s.split("|")[1])
else:
return -1
# discard headings and other non-entry lines
text = "".join(l for l in sys.stdin if l.startswith("- ") or l.startswith(" "))
bucketed = defaultdict(list)
for entry in re.split("\n(?=- )", text.strip()):
modules = re.findall(
r":(?:func|meth|mod|class):" r"`(?:[^<`]*<|~)?(?:sklearn.)?([a-z]\w+)", entry
)
modules = set(modules)
if len(modules) > 1:
key = "Multiple modules"
elif modules:
key = ":mod:`sklearn.%s`" % next(iter(modules))
else:
key = "Miscellaneous"
bucketed[key].append(entry)
entry = entry.strip() + "\n"
everything = []
for key, bucket in sorted(bucketed.items()):
everything.append(key + "\n" + "." * len(key))
bucket.sort(key=entry_sort_key)
everything.extend(bucket)
print("\n\n".join(everything))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import deserialize_keras_object
from keras.src.legacy.saving.serialization import serialize_keras_object
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl] = None
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor] = None
key_frame_indices: Optional[AnyTensor] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[VideoBytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[VideoBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
|
import argparse
from abc import ABC
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
__version__ = '0.14.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AlibabaCloudOpenSearchSettings": "langchain_community.vectorstores",
"AlibabaCloudOpenSearch": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AlibabaCloudOpenSearch",
"AlibabaCloudOpenSearchSettings",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AlibabaCloudOpenSearchSettings": "langchain_community.vectorstores",
"AlibabaCloudOpenSearch": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AlibabaCloudOpenSearchSettings",
"AlibabaCloudOpenSearch",
]
|
"""Init params."""
from llama_index.finetuning.embeddings.adapter import EmbeddingAdapterFinetuneEngine
from llama_index.finetuning.embeddings.sentence_transformer import (
SentenceTransformersFinetuneEngine,
)
__all__ = ["EmbeddingAdapterFinetuneEngine", "SentenceTransformersFinetuneEngine"]
|
"""Init params."""
from llama_index.finetuning.embeddings.adapter import EmbeddingAdapterFinetuneEngine
from llama_index.finetuning.embeddings.sentence_transformer import (
SentenceTransformersFinetuneEngine,
)
__all__ = ["EmbeddingAdapterFinetuneEngine", "SentenceTransformersFinetuneEngine"]
|
import functools
from collections import defaultdict
import torch
from torch._export.passes._node_metadata_hook import (
_node_metadata_hook,
_set_node_metadata_hook,
)
from torch._library.fake_profile import OpProfile, TensorMetadata
def insert_custom_op_guards(gm: torch.fx.GraphModule, ops_to_guard: set[str]) -> None:
"""
This is used by draft_export to insert guards in front of calls to custom
operators which have a generated fake kernel.
"""
for node in gm.graph.nodes:
if node.op == "call_function" and str(node.target) in ops_to_guard:
with (
_set_node_metadata_hook(
gm,
functools.partial(
_node_metadata_hook, stack_trace=node.meta.get("stack_trace")
),
),
gm.graph.inserting_before(node),
):
for arg in (*node.args, *node.kwargs.values()):
if isinstance(arg, torch.fx.Node) and isinstance(
arg.meta.get("val"), torch.Tensor
):
val = arg.meta["val"]
gm.graph.call_function(
torch.ops.aten._assert_tensor_metadata.default,
args=(arg,),
kwargs={
"dtype": val.dtype,
"device": val.device,
"layout": val.layout,
},
)
gm.recompile()
def get_op_profiles(
gm: torch.fx.GraphModule, ops_to_guard: set[str]
) -> dict[str, set[OpProfile]]:
"""
This is used by draft_export to get a list of custom operator profiles so
that we can generate fake kernels.
"""
def _get_op_profile(node: torch.fx.Node) -> OpProfile:
args_profile = tuple(
[
TensorMetadata.maybe_from_tensor(arg.meta.get("val"))
if isinstance(arg, torch.fx.Node)
else None
for arg in (*node.args, *node.kwargs.values())
]
)
out_profile = None
meta = node.meta.get("val")
assert meta is not None
if isinstance(meta, torch.Tensor):
out_profile = TensorMetadata.maybe_from_tensor(meta)
elif isinstance(meta, (list, tuple)):
out_profile = tuple([TensorMetadata.maybe_from_tensor(m) for m in meta]) # type: ignore[assignment]
assert out_profile is not None
return OpProfile(args_profile, out_profile) # type: ignore[arg-type]
op_profiles: dict[str, set[OpProfile]] = defaultdict(set)
for node in gm.graph.nodes:
if node.op == "call_function" and str(node.target) in ops_to_guard:
op_profiles[str(node.target)].add(_get_op_profile(node))
return op_profiles
|
import functools
from collections import defaultdict
import torch
from torch._export.passes._node_metadata_hook import (
_node_metadata_hook,
_set_node_metadata_hook,
)
from torch._library.fake_profile import OpProfile, TensorMetadata
def insert_custom_op_guards(gm: torch.fx.GraphModule, ops_to_guard: set[str]) -> None:
"""
This is used by draft_export to insert guards in front of calls to custom
operators which have a generated fake kernel.
"""
for node in gm.graph.nodes:
if node.op == "call_function" and str(node.target) in ops_to_guard:
with _set_node_metadata_hook(
gm,
functools.partial(
_node_metadata_hook, stack_trace=node.meta.get("stack_trace")
),
), gm.graph.inserting_before(node):
for arg in (*node.args, *node.kwargs.values()):
if isinstance(arg, torch.fx.Node) and isinstance(
arg.meta.get("val"), torch.Tensor
):
val = arg.meta["val"]
gm.graph.call_function(
torch.ops.aten._assert_tensor_metadata.default,
args=(arg,),
kwargs={
"dtype": val.dtype,
"device": val.device,
"layout": val.layout,
},
)
gm.recompile()
def get_op_profiles(
gm: torch.fx.GraphModule, ops_to_guard: set[str]
) -> dict[str, set[OpProfile]]:
"""
This is used by draft_export to get a list of custom operator profiles so
that we can generate fake kernels.
"""
def _get_op_profile(node: torch.fx.Node) -> OpProfile:
args_profile = tuple(
[
TensorMetadata.maybe_from_tensor(arg.meta.get("val"))
if isinstance(arg, torch.fx.Node)
else None
for arg in (*node.args, *node.kwargs.values())
]
)
out_profile = None
meta = node.meta.get("val")
assert meta is not None
if isinstance(meta, torch.Tensor):
out_profile = TensorMetadata.maybe_from_tensor(meta)
elif isinstance(meta, (list, tuple)):
out_profile = tuple([TensorMetadata.maybe_from_tensor(m) for m in meta]) # type: ignore[assignment]
assert out_profile is not None
return OpProfile(args_profile, out_profile) # type: ignore[arg-type]
op_profiles: dict[str, set[OpProfile]] = defaultdict(set)
for node in gm.graph.nodes:
if node.op == "call_function" and str(node.target) in ops_to_guard:
op_profiles[str(node.target)].add(_get_op_profile(node))
return op_profiles
|
# coding=utf-8
# Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any
from ..file_utils import is_torch_available
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
if is_torch_available():
import torch
from ..utils import is_accelerate_available, is_quark_available, logging
if is_accelerate_available():
from accelerate.utils import set_module_tensor_to_device
logger = logging.get_logger(__name__)
CHECKPOINT_KEYS = {
"weight_scale": "weight_quantizer.scale",
"bias_scale": "bias_quantizer.scale",
"input_scale": "input_quantizer.scale",
"output_scale": "output_quantizer.scale",
"weight_zero_point": "weight_quantizer.zero_point",
"bias_zero_point": "bias_quantizer.zero_point",
"input_zero_point": "input_quantizer.zero_point",
"output_zero_point": "output_quantizer.zero_point",
}
class QuarkHfQuantizer(HfQuantizer):
"""
Quark quantizer (https://quark.docs.amd.com/latest/).
"""
requires_calibration = True # On-the-fly quantization with quark is not supported for now.
required_packages = ["quark"]
# Checkpoints are expected to be already quantized when loading a quark model. However, as some keys from
# the checkpoint might mismatch the model parameters keys, we use the `create_quantized_param` method
# to load the checkpoints, remapping the keys.
requires_parameters_quantization = True
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.json_export_config = quantization_config.json_export_config
def validate_environment(self, *args, **kwargs):
if not is_quark_available():
raise ImportError(
"Loading a Quark quantized model requires the `quark` library but it was not found in the environment. Please refer to https://quark.docs.amd.com/latest/install.html."
)
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
from quark.torch.export.api import _map_to_quark
_map_to_quark(
model,
self.quantization_config.quant_config,
pack_method=self.json_export_config.pack_method,
custom_mode=self.quantization_config.custom_mode,
)
return model
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: dict[str, Any],
**kwargs,
) -> bool:
return True
def create_quantized_param(
self, model, param, param_name, param_device, state_dict, unexpected_keys
) -> "torch.nn.Parameter":
postfix = param_name.split(".")[-1]
if postfix in CHECKPOINT_KEYS:
param_name = param_name.replace(postfix, CHECKPOINT_KEYS[postfix])
set_module_tensor_to_device(model, param_name, param_device, value=param)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def is_serializable(self, safe_serialization=None):
return False
@property
def is_trainable(self):
return False
|
# coding=utf-8
# Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict
from ..file_utils import is_torch_available
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
if is_torch_available():
import torch
from ..utils import is_accelerate_available, is_quark_available, logging
if is_accelerate_available():
from accelerate.utils import set_module_tensor_to_device
logger = logging.get_logger(__name__)
CHECKPOINT_KEYS = {
"weight_scale": "weight_quantizer.scale",
"bias_scale": "bias_quantizer.scale",
"input_scale": "input_quantizer.scale",
"output_scale": "output_quantizer.scale",
"weight_zero_point": "weight_quantizer.zero_point",
"bias_zero_point": "bias_quantizer.zero_point",
"input_zero_point": "input_quantizer.zero_point",
"output_zero_point": "output_quantizer.zero_point",
}
class QuarkHfQuantizer(HfQuantizer):
"""
Quark quantizer (https://quark.docs.amd.com/latest/).
"""
requires_calibration = True # On-the-fly quantization with quark is not supported for now.
required_packages = ["quark"]
# Checkpoints are expected to be already quantized when loading a quark model. However, as some keys from
# the checkpoint might mismatch the model parameters keys, we use the `create_quantized_param` method
# to load the checkpoints, remapping the keys.
requires_parameters_quantization = True
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.json_export_config = quantization_config.json_export_config
def validate_environment(self, *args, **kwargs):
if not is_quark_available():
raise ImportError(
"Loading a Quark quantized model requires the `quark` library but it was not found in the environment. Please refer to https://quark.docs.amd.com/latest/install.html."
)
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
from quark.torch.export.api import _map_to_quark
_map_to_quark(
model,
self.quantization_config.quant_config,
pack_method=self.json_export_config.pack_method,
custom_mode=self.quantization_config.custom_mode,
)
return model
def check_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
state_dict: Dict[str, Any],
**kwargs,
) -> bool:
return True
def create_quantized_param(
self, model, param, param_name, param_device, state_dict, unexpected_keys
) -> "torch.nn.Parameter":
postfix = param_name.split(".")[-1]
if postfix in CHECKPOINT_KEYS:
param_name = param_name.replace(postfix, CHECKPOINT_KEYS[postfix])
set_module_tensor_to_device(model, param_name, param_device, value=param)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def is_serializable(self, safe_serialization=None):
return False
@property
def is_trainable(self):
return False
|
"""Snowflake Reader."""
import logging
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
logger = logging.getLogger(__name__)
class SnowflakeReader(BaseReader):
"""
Initializes a new instance of the SnowflakeReader.
This class establishes a connection to Snowflake using SQLAlchemy, executes query
and concatenates each row into Document used by LlamaIndex.
Attributes:
engine (Optional[Engine]): SQLAlchemy Engine object of the database connection.
OR
account (Optional[str]): Snowflake account identifier.
user (Optional[str]): Snowflake account username.
password (Optional[str]): Password for the Snowflake account.
database (Optional[str]): Snowflake database name.
schema (Optional[str]): Snowflake schema name.
warehouse (Optional[str]): Snowflake warehouse name.
proxy (Optional[str]): Proxy setting for the connection.
"""
def __init__(
self,
account: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
database: Optional[str] = None,
schema: Optional[str] = None,
warehouse: Optional[str] = None,
role: Optional[str] = None,
proxy: Optional[str] = None,
engine: Optional[Engine] = None,
) -> None:
"""
Initializes the SnowflakeReader with optional connection details, proxy configuration, or an engine directly.
Args:
account (Optional[str]): Snowflake account identifier.
user (Optional[str]): Snowflake account username.
password (Optional[str]): Password for the Snowflake account.
database (Optional[str]): Snowflake database name.
schema (Optional[str]): Snowflake schema name.
warehouse (Optional[str]): Snowflake warehouse name.
role (Optional[str]): Snowflake role name.
proxy (Optional[str]): Proxy setting for the connection.
engine (Optional[Engine]): Existing SQLAlchemy engine.
"""
from snowflake.sqlalchemy import URL
if engine is None:
connect_args = {}
if proxy:
connect_args["proxy"] = proxy
# Create an SQLAlchemy engine for Snowflake
self.engine = create_engine(
URL(
account=account or "",
user=user or "",
password=password or "",
database=database or "",
schema=schema or "",
warehouse=warehouse or "",
role=role or "",
),
connect_args=connect_args,
)
else:
self.engine = engine
# Create a sessionmaker bound to the engine
self.Session = sessionmaker(bind=self.engine)
def execute_query(self, query_string: str) -> List[Any]:
"""
Executes a SQL query and returns the fetched results.
Args:
query_string (str): The SQL query to be executed.
Returns:
List[Any]: The fetched results from the query.
"""
# Create a session and execute the query
session = self.Session()
try:
result = session.execute(text(query_string))
return result.fetchall()
finally:
# Ensure the session is closed after query execution
session.close()
def load_data(self, query: str) -> List[Document]:
"""
Query and load data from the Database, returning a list of Documents.
Args:
query (str): Query parameter to filter tables and rows.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
if query is None:
raise ValueError("A query parameter is necessary to filter the data")
try:
result = self.execute_query(query)
for item in result:
# fetch each item
doc_str = ", ".join([str(entry) for entry in item])
documents.append(Document(text=doc_str))
return documents
except Exception as e:
logger.error(
f"An error occurred while loading the data: {e}", exc_info=True
)
|
"""Snowflake Reader."""
import logging
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
logger = logging.getLogger(__name__)
class SnowflakeReader(BaseReader):
"""
Initializes a new instance of the SnowflakeReader.
This class establishes a connection to Snowflake using SQLAlchemy, executes query
and concatenates each row into Document used by LlamaIndex.
Attributes:
engine (Optional[Engine]): SQLAlchemy Engine object of the database connection.
OR
account (Optional[str]): Snowflake account identifier.
user (Optional[str]): Snowflake account username.
password (Optional[str]): Password for the Snowflake account.
database (Optional[str]): Snowflake database name.
schema (Optional[str]): Snowflake schema name.
warehouse (Optional[str]): Snowflake warehouse name.
proxy (Optional[str]): Proxy setting for the connection.
"""
def __init__(
self,
account: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
database: Optional[str] = None,
schema: Optional[str] = None,
warehouse: Optional[str] = None,
role: Optional[str] = None,
proxy: Optional[str] = None,
engine: Optional[Engine] = None,
) -> None:
"""
Initializes the SnowflakeReader with optional connection details, proxy configuration, or an engine directly.
Args:
account (Optional[str]): Snowflake account identifier.
user (Optional[str]): Snowflake account username.
password (Optional[str]): Password for the Snowflake account.
database (Optional[str]): Snowflake database name.
schema (Optional[str]): Snowflake schema name.
warehouse (Optional[str]): Snowflake warehouse name.
role (Optional[str]): Snowflake role name.
proxy (Optional[str]): Proxy setting for the connection.
engine (Optional[Engine]): Existing SQLAlchemy engine.
"""
from snowflake.sqlalchemy import URL
if engine is None:
connect_args = {}
if proxy:
connect_args["proxy"] = proxy
# Create an SQLAlchemy engine for Snowflake
self.engine = create_engine(
URL(
account=account or "",
user=user or "",
password=password or "",
database=database or "",
schema=schema or "",
warehouse=warehouse or "",
role=role or "",
),
connect_args=connect_args,
)
else:
self.engine = engine
# Create a sessionmaker bound to the engine
self.Session = sessionmaker(bind=self.engine)
def execute_query(self, query_string: str) -> List[Any]:
"""
Executes a SQL query and returns the fetched results.
Args:
query_string (str): The SQL query to be executed.
Returns:
List[Any]: The fetched results from the query.
"""
# Create a session and execute the query
session = self.Session()
try:
result = session.execute(text(query_string))
return result.fetchall()
finally:
# Ensure the session is closed after query execution
session.close()
def load_data(self, query: str) -> List[Document]:
"""Query and load data from the Database, returning a list of Documents.
Args:
query (str): Query parameter to filter tables and rows.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
if query is None:
raise ValueError("A query parameter is necessary to filter the data")
try:
result = self.execute_query(query)
for item in result:
# fetch each item
doc_str = ", ".join([str(entry) for entry in item])
documents.append(Document(text=doc_str))
return documents
except Exception as e:
logger.error(
f"An error occurred while loading the data: {e}", exc_info=True
)
|
import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the unit is not active.
Formula:
``` python
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Example:
``` python
leaky_relu_layer = LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
result = leaky_relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
negative_slope: Float >= 0.0. Negative slope coefficient.
Defaults to `0.3`.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(self, negative_slope=0.3, **kwargs):
if "alpha" in kwargs:
negative_slope = kwargs.pop("alpha")
warnings.warn(
"Argument `alpha` is deprecated. "
"Use `negative_slope` instead."
)
super().__init__(**kwargs)
if negative_slope is None or negative_slope < 0:
raise ValueError(
"The negative_slope value of a Leaky ReLU layer "
"cannot be None or negative value. Expected a float."
f" Received: negative_slope={negative_slope}"
)
self.negative_slope = negative_slope
self.supports_masking = True
self.built = True
def call(self, inputs):
return activations.leaky_relu(
inputs, negative_slope=self.negative_slope
)
def get_config(self):
config = super().get_config()
config.update({"negative_slope": self.negative_slope})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
import warnings
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LeakyReLU")
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit activation layer.
This layer allows a small gradient when the unit is not active.
Formula:
``` python
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Example:
``` python
leaky_relu_layer = LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
result = leaky_relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
negative_slope: Float >= 0.0. Negative slope coefficient.
Defaults to `0.3`.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
def __init__(self, negative_slope=0.3, **kwargs):
if "alpha" in kwargs:
negative_slope = kwargs.pop("alpha")
warnings.warn(
"Argument `alpha` is deprecated. "
"Use `negative_slope` instead."
)
super().__init__(**kwargs)
if negative_slope is None or negative_slope < 0:
raise ValueError(
"The negative_slope value of a Leaky ReLU layer "
"cannot be None or negative value. Expected a float."
f" Received: negative_slope={negative_slope}"
)
self.supports_masking = True
self.negative_slope = negative_slope
def call(self, inputs):
return activations.leaky_relu(
inputs, negative_slope=self.negative_slope
)
def get_config(self):
config = super().get_config()
config.update({"negative_slope": self.negative_slope})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
_base_ = './cascade-mask-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='audio_ndarray')
class AudioNdArray(AbstractAudioTensor, NdArray):
"""
Subclass of NdArray, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
import numpy as np
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioNdArray]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
# from tensor
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=np.random.rand(1000, 2),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
# from url
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional
from jina import Document, DocumentArray
from jina.logging.logger import JinaLogger
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
class MongoHandler:
def __init__(
self,
host: str = 'localhost',
port: int = 27017,
username: Optional[str] = None,
password: Optional[str] = None,
database: str = 'jina_index_db',
collection: str = 'jina_index_collection',
):
self._logger = JinaLogger('mongo_handler')
self._database_name = database
self._collection_name = collection
self._collection = None
if username and password:
self._connection = MongoClient(
f'mongodb://{username}:{password}@{host}:{port}'
)
else:
self._connection = MongoClient(f'mongodb://{host}:{port}')
self._logger.info(f'Connected to mongodb instance at {host}:{port}')
@property
def collection(self):
"""Get the collection, if the collection is new,
create index based on ID field."""
if not self._collection:
self._collection = self._connection[self._database_name][
self._collection_name
]
self._collection.create_index(
'id', unique=True
) # create index on doc.id field if index not exist.
return self._collection
return self._collection
def add(self, docs: DocumentArray, **kwargs):
"""Insert document from docs into mongodb instance."""
dict_docs = []
for doc in docs:
item = doc.dict()
if doc.embedding is not None:
item['embedding'] = list(doc.embedding.flatten())
dict_docs.append(item)
try:
self.collection.insert_many(
documents=dict_docs,
ordered=True, # all document inserts will be attempted.
)
except BulkWriteError:
raise
def update(self, docs: DocumentArray, **kwargs):
"""Update item from docs based on doc id."""
for doc in docs:
item = doc.dict()
item['embedding'] = []
if doc.embedding is not None:
item['embedding'] = list(doc.embedding.flatten())
self.collection.replace_one(
filter={'id': {'$eq': doc.id}},
replacement=item,
upsert=True,
)
def delete(self, docs: DocumentArray, **kwargs):
"""Delete item from docs based on doc id."""
doc_ids = [doc.id for doc in docs]
self.collection.delete_many(filter={'id': {'$in': doc_ids}})
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
result = self.collection.find_one(
filter={'id': doc.id}, projection={'_id': False}
)
if result:
if 'embedding' in result:
result.pop('embedding')
retrieved_doc = Document(result)
doc.update(retrieved_doc)
def get_size(self) -> int:
"""Get the size of collection"""
return self.collection.count()
def close(self):
"""Close connection."""
if self._connection:
self._connection.close()
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
from jina.logging.logger import JinaLogger
from jina import Document, DocumentArray
class MongoHandler:
def __init__(
self,
host: str = 'localhost',
port: int = 27017,
username: Optional[str] = None,
password: Optional[str] = None,
database: str = 'jina_index_db',
collection: str = 'jina_index_collection',
):
self._logger = JinaLogger('mongo_handler')
self._database_name = database
self._collection_name = collection
self._collection = None
if username and password:
self._connection = MongoClient(
f'mongodb://{username}:{password}@{host}:{port}'
)
else:
self._connection = MongoClient(f'mongodb://{host}:{port}')
self._logger.info(f'Connected to mongodb instance at {host}:{port}')
@property
def collection(self):
"""Get the collection, if the collection is new, create index based on ID field."""
if not self._collection:
self._collection = self._connection[self._database_name][
self._collection_name
]
self._collection.create_index(
'id', unique=True
) # create index on doc.id field if index not exist.
return self._collection
return self._collection
def add(self, docs: DocumentArray, **kwargs):
"""Insert document from docs into mongodb instance."""
dict_docs = []
for doc in docs:
item = doc.dict()
if doc.embedding is not None:
item['embedding'] = list(doc.embedding.flatten())
dict_docs.append(item)
try:
self.collection.insert_many(
documents=dict_docs,
ordered=True, # all document inserts will be attempted.
)
except BulkWriteError:
raise
def update(self, docs: DocumentArray, **kwargs):
"""Update item from docs based on doc id."""
for doc in docs:
item = doc.dict()
item['embedding'] = []
if doc.embedding is not None:
item['embedding'] = list(doc.embedding.flatten())
self.collection.replace_one(
filter={'id': {'$eq': doc.id}},
replacement=item,
upsert=True,
)
def delete(self, docs: DocumentArray, **kwargs):
"""Delete item from docs based on doc id."""
doc_ids = [doc.id for doc in docs]
self.collection.delete_many(filter={'id': {'$in': doc_ids}})
def search(self, docs: DocumentArray, **kwargs):
for doc in docs:
result = self.collection.find_one(
filter={'id': doc.id}, projection={'_id': False}
)
if result:
result.pop('embedding')
retrieved_doc = Document(result)
doc.update(retrieved_doc)
def get_size(self) -> int:
"""Get the size of collection"""
return self.collection.count()
def close(self):
"""Close connection."""
if self._connection:
self._connection.close()
|
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_train.json',
data_prefix=dict(img='Img/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_query.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_gallery.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_query.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_gallery.json',
metric=['bbox', 'segm'],
format_only=False)
|
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(750, 1101),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
img_prefix=data_root + 'Img/',
pipeline=train_pipeline,
data_root=data_root),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
img_prefix=data_root + 'Img/',
pipeline=test_pipeline,
data_root=data_root),
test=dict(
type=dataset_type,
ann_file=data_root +
'annotations/DeepFashion_segmentation_gallery.json',
img_prefix=data_root + 'Img/',
pipeline=test_pipeline,
data_root=data_root))
evaluation = dict(interval=5, metric=['bbox', 'segm'])
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8429 Spearman: 0.8366
Model Sparsity: Active Dimensions: 78.3, Sparsity Ratio: 0.9974
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8366
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8430 Spearman: 0.8368
Model Sparsity: Active Dimensions: 81.1, Sparsity Ratio: 0.9973
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import ast
from typing import List, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class PythonFileToolSpec(BaseToolSpec):
spec_functions = ["function_definitions", "get_function", "get_functions"]
def __init__(self, file_name: str) -> None:
f = open(file_name).read()
self.tree = ast.parse(f)
def function_definitions(self, external: Optional[bool] = True) -> str:
"""
Use this function to get the name and arguments of all function definitions in the python file.
Args:
external (Optional[bool]): Defaults to true. If false, this function will also return functions that start with _
"""
functions = ""
for node in ast.walk(self.tree):
if isinstance(node, ast.FunctionDef):
if external and node.name.startswith("_"):
continue
functions += f"""
name: {node.name}
arguments: {ast.dump(node.args)}
"""
return functions
def get_function(self, name: str) -> str:
"""
Use this function to get the name and arguments of a single function definition in the python file.
Args:
name (str): The name of the function to retrieve
"""
for node in ast.walk(self.tree):
if isinstance(node, ast.FunctionDef):
if node.name == name:
return f"""
name: {node.name}
arguments: {ast.dump(node.args)}
docstring: {ast.get_docstring(node)}
"""
return None
def get_functions(self, names: List[str]) -> str:
"""
Use this function to get the name and arguments of a list of function definition in the python file.
Args:
name (List[str]): The names of the functions to retrieve
"""
functions = ""
for name in names:
functions += self.get_function(name) + "\n"
return functions
|
import ast
from typing import List, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class PythonFileToolSpec(BaseToolSpec):
spec_functions = ["function_definitions", "get_function", "get_functions"]
def __init__(self, file_name: str) -> None:
f = open(file_name).read()
self.tree = ast.parse(f)
def function_definitions(self, external: Optional[bool] = True) -> str:
"""
Use this function to get the name and arguments of all function definitions in the python file.
Args:
external (Optional[bool]): Defaults to true. If false, this function will also return functions that start with _
"""
functions = ""
for node in ast.walk(self.tree):
if isinstance(node, ast.FunctionDef):
if external and node.name.startswith("_"):
continue
functions += f"""
name: {node.name}
arguments: {ast.dump(node.args)}
"""
return functions
def get_function(self, name: str) -> str:
"""
Use this function to get the name and arguments of a single function definition in the python file.
Args:
name (str): The name of the function to retrieve
"""
for node in ast.walk(self.tree):
if isinstance(node, ast.FunctionDef):
if node.name == name:
return f"""
name: {node.name}
arguments: {ast.dump(node.args)}
docstring: {ast.get_docstring(node)}
"""
return None
def get_functions(self, names: List[str]) -> str:
"""
Use this function to get the name and arguments of a list of function definition in the python file.
Args:
name (List[str]): The names of the functions to retrieve
"""
functions = ""
for name in names:
functions += self.get_function(name) + "\n"
return functions
|
"""Simple reader that reads wikipedia."""
from typing import Any, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class WikipediaReader(BasePydanticReader):
"""
Wikipedia reader.
Reads a page.
"""
is_remote: bool = True
def __init__(self) -> None:
"""Initialize with parameters."""
try:
import wikipedia # noqa
except ImportError:
raise ImportError(
"`wikipedia` package not found, please run `pip install wikipedia`"
)
@classmethod
def class_name(cls) -> str:
return "WikipediaReader"
def load_data(
self, pages: List[str], lang_prefix: str = "en", **load_kwargs: Any
) -> List[Document]:
"""
Load data from the input directory.
Args:
pages (List[str]): List of pages to read.
lang_prefix (str): Language prefix for Wikipedia. Defaults to English. Valid Wikipedia language codes
can be found at https://en.wikipedia.org/wiki/List_of_Wikipedias.
"""
import wikipedia
if lang_prefix.lower() != "en":
if lang_prefix.lower() in wikipedia.languages():
wikipedia.set_lang(lang_prefix.lower())
else:
raise ValueError(
f"Language prefix '{lang_prefix}' for Wikipedia is not supported. Check supported languages at https://en.wikipedia.org/wiki/List_of_Wikipedias."
)
results = []
for page in pages:
wiki_page = wikipedia.page(page, **load_kwargs)
page_content = wiki_page.content
page_id = wiki_page.pageid
results.append(Document(id_=page_id, text=page_content))
return results
|
"""Simple reader that reads wikipedia."""
from typing import Any, List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class WikipediaReader(BasePydanticReader):
"""Wikipedia reader.
Reads a page.
"""
is_remote: bool = True
def __init__(self) -> None:
"""Initialize with parameters."""
try:
import wikipedia # noqa
except ImportError:
raise ImportError(
"`wikipedia` package not found, please run `pip install wikipedia`"
)
@classmethod
def class_name(cls) -> str:
return "WikipediaReader"
def load_data(
self, pages: List[str], lang_prefix: str = "en", **load_kwargs: Any
) -> List[Document]:
"""Load data from the input directory.
Args:
pages (List[str]): List of pages to read.
lang_prefix (str): Language prefix for Wikipedia. Defaults to English. Valid Wikipedia language codes
can be found at https://en.wikipedia.org/wiki/List_of_Wikipedias.
"""
import wikipedia
if lang_prefix.lower() != "en":
if lang_prefix.lower() in wikipedia.languages():
wikipedia.set_lang(lang_prefix.lower())
else:
raise ValueError(
f"Language prefix '{lang_prefix}' for Wikipedia is not supported. Check supported languages at https://en.wikipedia.org/wiki/List_of_Wikipedias."
)
results = []
for page in pages:
wiki_page = wikipedia.page(page, **load_kwargs)
page_content = wiki_page.content
page_id = wiki_page.pageid
results.append(Document(id_=page_id, text=page_content))
return results
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames: List[np.ndarray] = []
video_frames: List[np.ndarray] = []
keyframe_indices: List[int] = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
video_frames.append(frame.to_ndarray(format='rgb24'))
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames: List[np.ndarray] = []
video_frames: List[np.ndarray] = []
keyframe_indices: List[int] = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
video_frames.append(frame.to_ndarray(format='rgb24'))
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = 'tridentnet_r50_caffe_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = [
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
]
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = [
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
]
|
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill={datapoints.Mask: 255, "others": 0})]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ConvertImageDtype(torch.float)]
transforms += [T.Normalize(mean=mean, std=std)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from collections import defaultdict
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill=defaultdict(lambda: 0, {datapoints.Mask: 255}))]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ConvertImageDtype(torch.float)]
transforms += [T.Normalize(mean=mean, std=std)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
da = DocumentArray([])
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.data.docs = da
|
"""Module for helper functions for clients."""
from typing import Tuple, Optional
from docarray import Document, DocumentArray
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
batch, data_type: DataInputType, endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(endpoint: str, target: Optional[str], parameters: Optional[dict]) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return Document.from_dict(data), DataInputType.DICT
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return Document.from_dict(data), DataInputType.DICT
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
da = DocumentArray()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.data.docs = da
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss should
# be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_ld_loss.item() >= 0, 'ld loss should be non-negative'
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
gt_bboxes_ignore = gt_bboxes
# When truth is non-empty but ignored then the cls loss should be nonzero,
# but there should be no box loss.
ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
assert ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert ignore_box_loss.item() == 0, 'gt bbox ignored loss should be zero'
# When truth is non-empty and not ignored then both cls and box loss should
# be nonzero for random inputs
gt_bboxes_ignore = [torch.randn(1, 4)]
not_ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, rand_soft_target, img_metas,
gt_bboxes_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
assert not_ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert not_ignore_box_loss.item(
) > 0, 'gt bbox not ignored loss should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss should
# be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_ld_loss.item() >= 0, 'ld loss should be non-negative'
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
gt_bboxes_ignore = gt_bboxes
# When truth is non-empty but ignored then the cls loss should be nonzero,
# but there should be no box loss.
ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
assert ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert ignore_box_loss.item() == 0, 'gt bbox ignored loss should be zero'
# When truth is non-empty and not ignored then both cls and box loss should
# be nonzero for random inputs
gt_bboxes_ignore = [torch.randn(1, 4)]
not_ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, rand_soft_target, img_metas,
gt_bboxes_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
assert not_ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert not_ignore_box_loss.item(
) > 0, 'gt bbox not ignored loss should be non-zero'
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls, value: list[BaseMemory]
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}"
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls, value: list[BaseMemory]
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory):
if val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}"
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
from pathlib import Path
import numpy as np
import pytest
from custom_image_torch_encoder import CustomImageTorchEncoder
from jina import Document, DocumentArray, Executor
@pytest.fixture
def encoder():
model_dir = Path(__file__).parents[1] / 'model'
return CustomImageTorchEncoder(
model_definition_file=str(model_dir / 'external_model.py'),
model_state_dict_path=str(model_dir / 'model_state_dict.pth'),
layer_name='conv1',
model_class_name='ExternalModel',
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.layer_name == 'conv1'
def test_encoder(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
def test_encoder_traversal_paths(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray(
[
Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
]
)
encoder.encode(docs, {'traversal_paths': ['c']})
assert len(docs) == 2
assert len(docs.traverse_flat(['c'])) == 4
for chunk in docs.traverse_flat(['c']):
assert chunk.embedding.shape == (output_dim,)
@pytest.mark.gpu
def test_encoder_gpu():
model_dir = Path(__file__).parents[1] / 'model'
encoder = CustomImageTorchEncoder(
model_definition_file=str(model_dir / 'external_model.py'),
model_state_dict_path=str(model_dir / 'model_state_dict.pth'),
layer_name='conv1',
model_class_name='ExternalModel',
device='cuda',
)
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
|
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...custom_image_torch_encoder import CustomImageTorchEncoder
@pytest.fixture
def encoder():
model_dir = Path(__file__).parents[1] / 'model'
return CustomImageTorchEncoder(
model_definition_file=str(model_dir / 'external_model.py'),
model_state_dict_path=str(model_dir / 'model_state_dict.pth'),
layer_name='conv1',
model_class_name='ExternalModel',
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.layer_name == 'conv1'
def test_encoder(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
def test_encoder_traversal_paths(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray(
[
Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
]
)
encoder.encode(docs, {'traversal_paths': ['c']})
assert len(docs) == 2
assert len(docs.traverse_flat(['c'])) == 4
for chunk in docs.traverse_flat(['c']):
assert chunk.embedding.shape == (output_dim,)
@pytest.mark.gpu
def test_encoder_gpu():
model_dir = Path(__file__).parents[1] / 'model'
encoder = CustomImageTorchEncoder(
model_definition_file=str(model_dir / 'external_model.py'),
model_state_dict_path=str(model_dir / 'model_state_dict.pth'),
layer_name='conv1',
model_class_name='ExternalModel',
device='cuda',
)
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
|
"""Test LASER embeddings."""
import pytest
from langchain_community.embeddings.laser import LaserEmbeddings
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_documents(lang: str) -> None:
"""Test laser embeddings for documents.
User warning is returned by LASER library implementation
so will ignore in testing."""
documents = ["hello", "world"]
embedding = LaserEmbeddings(lang=lang)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_query(lang: str) -> None:
"""Test laser embeddings for query.
User warning is returned by LASER library implementation
so will ignore in testing."""
query = "hello world"
embedding = LaserEmbeddings(lang=lang)
output = embedding.embed_query(query)
assert len(output) == 1024
|
"""Test LASER embeddings."""
import pytest
from langchain_community.embeddings.laser import LaserEmbeddings
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_documents(lang: str) -> None:
"""Test laser embeddings for documents.
User warning is returned by LASER library implementation
so will ignore in testing."""
documents = ["hello", "world"]
embedding = LaserEmbeddings(lang=lang) # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2 # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
@pytest.mark.filterwarnings("ignore::UserWarning:")
@pytest.mark.parametrize("lang", [None, "lus_Latn", "english"])
def test_laser_embedding_query(lang: str) -> None:
"""Test laser embeddings for query.
User warning is returned by LASER library implementation
so will ignore in testing."""
query = "hello world"
embedding = LaserEmbeddings(lang=lang) # type: ignore[call-arg]
output = embedding.embed_query(query)
assert len(output) == 1024
|
"""Vector stores."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = (
"VST",
"InMemoryVectorStore",
"VectorStore",
"VectorStoreRetriever",
)
_dynamic_imports = {
"VectorStore": "base",
"VST": "base",
"VectorStoreRetriever": "base",
"InMemoryVectorStore": "in_memory",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Vector stores."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.vectorstores.base import VST, VectorStore, VectorStoreRetriever
from langchain_core.vectorstores.in_memory import InMemoryVectorStore
__all__ = (
"VectorStore",
"VST",
"VectorStoreRetriever",
"InMemoryVectorStore",
)
_dynamic_imports = {
"VectorStore": "base",
"VST": "base",
"VectorStoreRetriever": "base",
"InMemoryVectorStore": "in_memory",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import (
BaggingClassifier,
BaggingRegressor,
IsolationForest,
StackingClassifier,
StackingRegressor,
)
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [BaggingClassifier, BaggingRegressor, IsolationForest],
"include_params": ["max_samples"],
"exclude_params": None,
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": r"The number of samples to draw from X to train each.*",
"ignore_types": ("max_samples"),
},
{
"objects": [StackingClassifier, StackingRegressor],
"include_params": ["cv", "n_jobs", "passthrough", "verbose"],
"exclude_params": None,
"include_attrs": True,
"exclude_attrs": ["final_estimator_"],
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
]
FUNCTION_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": True,
"exclude_params": ["average", "zero_division"],
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": ["average"],
"exclude_params": None,
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": " ".join(
(
r"""This parameter is required for multiclass/multilabel targets\.
If ``None``, the metrics for each class are returned\. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``\.
This is applicable only if targets \(``y_\{true,pred\}``\) are binary\.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives\.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean\. This does not take label imbalance into account\.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support \(the number of true instances for each label\)\. This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall\."""
r"[\s\w]*\.*" # optionally match additional sentence
r"""
``'samples'``:
Calculate metrics for each instance, and find their average \(only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`\)\."""
).split()
),
},
]
@pytest.mark.parametrize("case", CLASS_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_class_docstring_consistency(case):
"""Check docstrings parameters consistency between related classes."""
assert_docstring_consistency(**case)
@pytest.mark.parametrize("case", FUNCTION_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_function_docstring_consistency(case):
"""Check docstrings parameters consistency between related functions."""
assert_docstring_consistency(**case)
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from sklearn import metrics
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.utils._testing import assert_docstring_consistency, skip_if_no_numpydoc
CLASS_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [StackingClassifier, StackingRegressor],
"include_params": ["cv", "n_jobs", "passthrough", "verbose"],
"exclude_params": None,
"include_attrs": True,
"exclude_attrs": ["final_estimator_"],
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
]
FUNCTION_DOCSTRING_CONSISTENCY_CASES = [
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": True,
"exclude_params": ["average", "zero_division"],
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": None,
},
{
"objects": [
metrics.precision_recall_fscore_support,
metrics.f1_score,
metrics.fbeta_score,
metrics.precision_score,
metrics.recall_score,
],
"include_params": ["average"],
"exclude_params": None,
"include_attrs": False,
"exclude_attrs": None,
"include_returns": False,
"exclude_returns": None,
"descr_regex_pattern": " ".join(
(
r"""This parameter is required for multiclass/multilabel targets\.
If ``None``, the metrics for each class are returned\. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``\.
This is applicable only if targets \(``y_\{true,pred\}``\) are binary\.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives\.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean\. This does not take label imbalance into account\.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support \(the number of true instances for each label\)\. This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall\."""
r"[\s\w]*\.*" # optionally match additional sentence
r"""
``'samples'``:
Calculate metrics for each instance, and find their average \(only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`\)\."""
).split()
),
},
]
@pytest.mark.parametrize("case", CLASS_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_class_docstring_consistency(case):
"""Check docstrings parameters consistency between related classes."""
assert_docstring_consistency(**case)
@pytest.mark.parametrize("case", FUNCTION_DOCSTRING_CONSISTENCY_CASES)
@skip_if_no_numpydoc
def test_function_docstring_consistency(case):
"""Check docstrings parameters consistency between related functions."""
assert_docstring_consistency(**case)
|
import os
from torchaudio.datasets import snips
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SAMPLE_RATE = 16000
_SPEAKERS = [
"Aditi",
"Amy",
"Brian",
"Emma",
"Geraint",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
"Kimberly",
"Matthew",
"Nicole",
"Raveena",
"Russell",
"Salli",
]
def _save_wav(filepath: str, seed: int):
wav = get_whitenoise(
sample_rate=_SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(filepath, wav, _SAMPLE_RATE)
return wav
def _save_label(label_path: str, wav_stem: str, label: str):
with open(label_path, "a") as f:
f.write(f"{wav_stem} {label}\n")
def _get_mocked_samples(dataset_dir: str, subset: str, seed: int):
samples = []
subset_dir = os.path.join(dataset_dir, subset)
label_path = os.path.join(dataset_dir, "all.iob.snips.txt")
os.makedirs(subset_dir, exist_ok=True)
num_utterance_per_split = 10
for spk in _SPEAKERS:
for i in range(num_utterance_per_split):
wav_stem = f"{spk}-snips-{subset}-{i}"
wav_path = os.path.join(subset_dir, f"{wav_stem}.wav")
waveform = _save_wav(wav_path, seed)
transcript, iob, intent = f"{spk}XXX", f"{spk}YYY", f"{spk}ZZZ"
label = "BOS " + transcript + " EOS\tO " + iob + " " + intent
_save_label(label_path, wav_stem, label)
samples.append((waveform, _SAMPLE_RATE, wav_stem, transcript, iob, intent))
return samples
def get_mock_datasets(dataset_dir):
"""
dataset_dir: directory to the mocked dataset
"""
os.makedirs(dataset_dir, exist_ok=True)
train_seed = 0
valid_seed = 1
test_seed = 2
mocked_train_samples = _get_mocked_samples(dataset_dir, "train", train_seed)
mocked_valid_samples = _get_mocked_samples(dataset_dir, "valid", valid_seed)
mocked_test_samples = _get_mocked_samples(dataset_dir, "test", test_seed)
return (
mocked_train_samples,
mocked_valid_samples,
mocked_test_samples,
)
class TestSnips(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
train_samples = {}
valid_samples = {}
test_samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "SNIPS")
(
cls.train_samples,
cls.valid_samples,
cls.test_samples,
) = get_mock_datasets(dataset_dir)
def _testSnips(self, dataset, data_samples):
num_samples = 0
for i, (data, sample_rate, file_name, transcript, iob, intent) in enumerate(dataset):
self.assertEqual(data, data_samples[i][0])
assert sample_rate == data_samples[i][1]
assert file_name == data_samples[i][2]
assert transcript == data_samples[i][3]
assert iob == data_samples[i][4]
assert intent == data_samples[i][5]
num_samples += 1
assert num_samples == len(data_samples)
def testSnipsTrain(self):
dataset = snips.Snips(self.root_dir, subset="train", audio_format="wav")
self._testSnips(dataset, self.train_samples)
def testSnipsValid(self):
dataset = snips.Snips(self.root_dir, subset="valid", audio_format="wav")
self._testSnips(dataset, self.valid_samples)
def testSnipsTest(self):
dataset = snips.Snips(self.root_dir, subset="test", audio_format="wav")
self._testSnips(dataset, self.test_samples)
|
import os
from torchaudio.datasets import snips
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SAMPLE_RATE = 16000
_SPEAKERS = [
"Aditi",
"Amy",
"Brian",
"Emma",
"Geraint",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
"Kimberly",
"Matthew",
"Nicole",
"Raveena",
"Russell",
"Salli",
]
def _save_wav(filepath: str, seed: int):
wav = get_whitenoise(
sample_rate=_SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
save_wav(filepath, wav, _SAMPLE_RATE)
return wav
def _save_label(label_path: str, wav_stem: str, label: str):
with open(label_path, "a") as f:
f.write(f"{wav_stem} {label}\n")
def _get_mocked_samples(dataset_dir: str, subset: str, seed: int):
samples = []
subset_dir = os.path.join(dataset_dir, subset)
label_path = os.path.join(dataset_dir, "all.iob.snips.txt")
os.makedirs(subset_dir, exist_ok=True)
num_utterance_per_split = 10
for spk in _SPEAKERS:
for i in range(num_utterance_per_split):
wav_stem = f"{spk}-snips-{subset}-{i}"
wav_path = os.path.join(subset_dir, f"{wav_stem}.wav")
waveform = _save_wav(wav_path, seed)
transcript, iob, intent = f"{spk}XXX", f"{spk}YYY", f"{spk}ZZZ"
label = "BOS " + transcript + " EOS\tO " + iob + " " + intent
_save_label(label_path, wav_stem, label)
samples.append((waveform, _SAMPLE_RATE, transcript, iob, intent))
return samples
def get_mock_datasets(dataset_dir):
"""
dataset_dir: directory to the mocked dataset
"""
os.makedirs(dataset_dir, exist_ok=True)
train_seed = 0
valid_seed = 1
test_seed = 2
mocked_train_samples = _get_mocked_samples(dataset_dir, "train", train_seed)
mocked_valid_samples = _get_mocked_samples(dataset_dir, "valid", valid_seed)
mocked_test_samples = _get_mocked_samples(dataset_dir, "test", test_seed)
return (
mocked_train_samples,
mocked_valid_samples,
mocked_test_samples,
)
class TestSnips(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
train_samples = {}
valid_samples = {}
test_samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "SNIPS")
(
cls.train_samples,
cls.valid_samples,
cls.test_samples,
) = get_mock_datasets(dataset_dir)
def _testSnips(self, dataset, data_samples):
num_samples = 0
for i, (data, sample_rate, transcript, iob, intent) in enumerate(dataset):
self.assertEqual(data, data_samples[i][0])
assert sample_rate == data_samples[i][1]
assert transcript == data_samples[i][2]
assert iob == data_samples[i][3]
assert intent == data_samples[i][4]
num_samples += 1
assert num_samples == len(data_samples)
def testSnipsTrain(self):
dataset = snips.Snips(self.root_dir, subset="train", audio_format="wav")
self._testSnips(dataset, self.train_samples)
def testSnipsValid(self):
dataset = snips.Snips(self.root_dir, subset="valid", audio_format="wav")
self._testSnips(dataset, self.valid_samples)
def testSnipsTest(self):
dataset = snips.Snips(self.root_dir, subset="test", audio_format="wav")
self._testSnips(dataset, self.test_samples)
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an AudioNdArray and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an AudioNdArray representing the Audio file content,
and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to AudioBytes. This will either load or download the file and save
it into an AudioBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: AudioBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[np.ndarray, int]:
"""
Load the data from the url into an AudioNdArray.
---
```python
from typing import Optional
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl, AudioNdArray
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
```
---
:return: AudioNdArray representing the audio file content.
"""
bytes_ = AudioBytes(self.load_bytes())
return bytes_.load()
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import multiprocessing
import random
import time
from functools import partial
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
time.sleep(0.1 * random.random())
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('shards', [10])
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
@pytest.mark.parametrize('prefetch', [1, 10])
@pytest.mark.parametrize('concurrent', [15])
def test_concurrent_clients(concurrent, protocol, shards, polling, prefetch, reraise):
def pong(peer_hash, queue, resp: Response):
for d in resp.docs:
queue.put((peer_hash, d.text))
def peer_client(port, protocol, peer_hash, queue):
c = Client(protocol=protocol, port=port, return_responses=True)
for _ in range(NUM_REQUESTS):
c.post(
'/ping',
Document(text=peer_hash),
on_done=lambda r: pong(peer_hash, queue, r),
)
f = Flow(protocol=protocol, prefetch=prefetch).add(
uses=MyExecutor, shards=shards, polling=polling
)
with f:
pqueue = multiprocessing.Queue()
port = f.port
process_pool = []
for peer_id in range(concurrent):
p = multiprocessing.Process(
target=partial(peer_client, port, protocol, str(peer_id), pqueue),
daemon=True,
)
p.start()
process_pool.append(p)
for p in process_pool:
p.join()
queue_len = 0
while not pqueue.empty():
peer_hash, text = pqueue.get()
assert peer_hash == text
queue_len += 1
assert queue_len == concurrent * NUM_REQUESTS
|
import pytest
from jina import Flow, Executor, Client, requests, DocumentArray, Document
import multiprocessing
import random
import time
from functools import partial
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
time.sleep(0.1 * random.random())
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('shards', [10])
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
@pytest.mark.parametrize('prefetch', [1, 10])
@pytest.mark.parametrize('concurrent', [15])
def test_concurrent_clients(concurrent, protocol, shards, polling, prefetch, reraise):
def pong(peer_hash, queue, resp: Response):
for d in resp.docs:
queue.put((peer_hash, d.text))
def peer_client(port, protocol, peer_hash, queue):
c = Client(protocol=protocol, port=port, return_responses=True)
for _ in range(NUM_REQUESTS):
c.post(
'/ping',
Document(text=peer_hash),
on_done=lambda r: pong(peer_hash, queue, r),
)
f = Flow(protocol=protocol, prefetch=prefetch).add(
uses=MyExecutor, shards=shards, polling=polling
)
set_of_clients_served = set()
with f:
pqueue = multiprocessing.Queue()
port = f.port
process_pool = []
for peer_id in range(concurrent):
p = multiprocessing.Process(
target=partial(peer_client, port, protocol, str(peer_id), pqueue),
daemon=True,
)
p.start()
process_pool.append(p)
for p in process_pool:
p.join()
queue_len = 0
while not pqueue.empty():
peer_hash, text = pqueue.get()
assert peer_hash == text
queue_len += 1
assert queue_len == concurrent * NUM_REQUESTS
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg)
assert not self.use_l1
assert isinstance(self.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when there are no true boxes')
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(self.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
self.use_l1 = True
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero'
assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero'
# Test groud truth out of bound
gt_bboxes = [torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]])]
gt_labels = [torch.LongTensor([2])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when gt_bboxes out of bound')
assert empty_box_loss.item() == 0, (
'there should be no box loss when gt_bboxes out of bound')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg)
assert not self.use_l1
assert isinstance(self.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when there are no true boxes')
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(self.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
self.use_l1 = True
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero'
assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero'
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.get_sparsity_stats(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats['row_sparsity_mean']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.get_sparsity_stats(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats_limited['row_sparsity_mean']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 32.0, 'row_sparsity_mean': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
|
from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: Tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: Tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
"""langchain-core version information and utilities."""
VERSION = "0.3.63"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.62"
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""
Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""
Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""
Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""
Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""
Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""
Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string, optional): Root directory of dataset. Only used for `__repr__`.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: Union[str, Path] = None, # type: ignore[assignment]
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> list[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> list[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
import os
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string, optional): Root directory of dataset. Only used for `__repr__`.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: Union[str, Path] = None, # type: ignore[assignment]
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class TripletLoss(BaseModule):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for
Person Re-Identification. arXiv:1703.07737.
Imported from `<https://github.com/KaiyangZhou/deep-person-reid/blob/
master/torchreid/losses/hard_mine_triplet_loss.py>`_.
Args:
margin (float, optional): Margin for triplet loss. Defaults to 0.3.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
hard_mining (bool, optional): Whether to perform hard mining.
Defaults to True.
"""
def __init__(self,
margin: float = 0.3,
loss_weight: float = 1.0,
hard_mining=True):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
self.loss_weight = loss_weight
self.hard_mining = hard_mining
def hard_mining_triplet_loss_forward(
self, inputs: torch.Tensor,
targets: torch.LongTensor) -> torch.Tensor:
"""
Args:
inputs (torch.Tensor): feature matrix with shape
(batch_size, feat_dim).
targets (torch.LongTensor): ground truth labels with shape
(batch_size).
Returns:
torch.Tensor: triplet loss with hard mining.
"""
batch_size = inputs.size(0)
# Compute Euclidean distance
dist = torch.pow(inputs, 2).sum(
dim=1, keepdim=True).expand(batch_size, batch_size)
dist = dist + dist.t()
dist.addmm_(inputs, inputs.t(), beta=1, alpha=-2)
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the furthest positive sample
# and nearest negative sample in the embedding space
mask = targets.expand(batch_size, batch_size).eq(
targets.expand(batch_size, batch_size).t())
dist_ap, dist_an = [], []
for i in range(batch_size):
dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)
return self.loss_weight * self.ranking_loss(dist_an, dist_ap, y)
def forward(self, inputs: torch.Tensor,
targets: torch.LongTensor) -> torch.Tensor:
"""
Args:
inputs (torch.Tensor): feature matrix with shape
(batch_size, feat_dim).
targets (torch.LongTensor): ground truth labels with shape
(num_classes).
Returns:
torch.Tensor: triplet loss.
"""
if self.hard_mining:
return self.hard_mining_triplet_loss_forward(inputs, targets)
else:
raise NotImplementedError()
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class TripletLoss(BaseModule):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for
Person Re-Identification. arXiv:1703.07737.
Imported from `<https://github.com/KaiyangZhou/deep-person-reid/blob/
master/torchreid/losses/hard_mine_triplet_loss.py>`_.
Args:
margin (float, optional): Margin for triplet loss. Defaults to 0.3.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
hard_mining (bool, optional): Whether to perform hard mining.
Defaults to True.
"""
def __init__(self,
margin: float = 0.3,
loss_weight: float = 1.0,
hard_mining=True):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
self.loss_weight = loss_weight
self.hard_mining = hard_mining
def hard_mining_triplet_loss_forward(
self, inputs: torch.Tensor,
targets: torch.LongTensor) -> torch.Tensor:
"""
Args:
inputs (torch.Tensor): feature matrix with shape
(batch_size, feat_dim).
targets (torch.LongTensor): ground truth labels with shape
(num_classes).
Returns:
torch.Tensor: triplet loss with hard mining.
"""
batch_size = inputs.size(0)
# Compute Euclidean distance
dist = torch.pow(inputs, 2).sum(
dim=1, keepdim=True).expand(batch_size, batch_size)
dist = dist + dist.t()
dist.addmm_(inputs, inputs.t(), beta=1, alpha=-2)
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the furthest positive sample
# and nearest negative sample in the embedding space
mask = targets.expand(batch_size, batch_size).eq(
targets.expand(batch_size, batch_size).t())
dist_ap, dist_an = [], []
for i in range(batch_size):
dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)
return self.loss_weight * self.ranking_loss(dist_an, dist_ap, y)
def forward(self, inputs: torch.Tensor,
targets: torch.LongTensor) -> torch.Tensor:
"""
Args:
inputs (torch.Tensor): feature matrix with shape
(batch_size, feat_dim).
targets (torch.LongTensor): ground truth labels with shape
(num_classes).
Returns:
torch.Tensor: triplet loss.
"""
if self.hard_mining:
return self.hard_mining_triplet_loss_forward(inputs, targets)
else:
raise NotImplementedError()
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.models import Sequential
class TimeDistributedTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.TimeDistributed,
init_kwargs={"layer": layers.Dense(1, use_bias=False)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 1),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_build(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (10, 128, 128, 3)
output_shape = (32, 10, 126, 126, 64)
else:
input_shape = (10, 3, 128, 128)
output_shape = (32, 10, 64, 126, 126)
inputs = layers.Input(shape=input_shape, batch_size=32)
conv_2d_layer = layers.Conv2D(64, (3, 3))
outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
self.assertEqual(outputs.shape, output_shape)
def test_correctness(self):
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.Dense(
1,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
layer = layers.TimeDistributed(layer=layer)
output = layer(sequence)
self.assertAllClose(
np.array(
[[[0.06], [0.22]], [[0.38], [0.53999996]], [[0.7], [0.86]]]
),
output,
)
def test_masking(self):
class MaskedDense(layers.Wrapper):
def __init__(self, units, **kwargs):
layer = layers.Dense(
units,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def call(self, inputs, training=False, mask=None):
unmasked = self.layer.call(inputs)
if mask is None:
return unmasked
else:
return ops.transpose(
ops.transpose(unmasked) * ops.cast(mask, inputs.dtype)
)
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.TimeDistributed(layer=MaskedDense(1))
mask = np.array([[False, True], [True, False], [True, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[[0], [0.22]], [[0.38], [0]], [[0.7], [0.86]]]),
output,
)
@pytest.mark.requires_trainable_backend
def test_with_mask_zero(self):
model = Sequential(
[
layers.Input(shape=(20,)),
layers.Embedding(input_dim=10, output_dim=5, mask_zero=True),
layers.TimeDistributed(
layers.Dense(units=5, activation="softmax")
),
]
)
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
X_train = np.random.uniform(1, 10, size=(22, 20))
Y_train = np.random.randint(1, 2, size=(22, 20))
model.fit(X_train, Y_train, epochs=1, batch_size=16)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src import testing
class TimeDistributedTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.TimeDistributed,
init_kwargs={"layer": layers.Dense(1, use_bias=False)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 1),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_build(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (10, 128, 128, 3)
output_shape = (32, 10, 126, 126, 64)
else:
input_shape = (10, 3, 128, 128)
output_shape = (32, 10, 64, 126, 126)
inputs = layers.Input(shape=input_shape, batch_size=32)
conv_2d_layer = layers.Conv2D(64, (3, 3))
outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
self.assertEqual(outputs.shape, output_shape)
def test_correctness(self):
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.Dense(
1,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
layer = layers.TimeDistributed(layer=layer)
output = layer(sequence)
self.assertAllClose(
np.array(
[[[0.06], [0.22]], [[0.38], [0.53999996]], [[0.7], [0.86]]]
),
output,
)
def test_masking(self):
class MaskedDense(layers.Wrapper):
def __init__(self, units, **kwargs):
layer = layers.Dense(
units,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def call(self, inputs, training=False, mask=None):
unmasked = self.layer.call(inputs)
if mask is None:
return unmasked
else:
return ops.transpose(
ops.transpose(unmasked) * ops.cast(mask, inputs.dtype)
)
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.TimeDistributed(layer=MaskedDense(1))
mask = np.array([[False, True], [True, False], [True, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[[0], [0.22]], [[0.38], [0]], [[0.7], [0.86]]]),
output,
)
|
from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[Image], **kwargs) -> DocumentArray[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocumentArray[Image](
[Image(url='https://via.placeholder.com/150.png')]
),
return_type=DocumentArray[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocumentArray[MyDoc],
response_schema=DocumentArray[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocumentArray[MyDoc], **kwargs) -> DocumentArray[MyDoc]:
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDocument):
img: ImageDoc
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
def test_deployments():
class InputDoc(BaseDocument):
img: ImageDoc
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
|
from typing import Optional
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDocument):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[Image], **kwargs) -> DocumentArray[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocumentArray[Image](
[Image(url='https://via.placeholder.com/150.png')]
),
return_type=DocumentArray[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocumentArray[MyDoc],
response_schema=DocumentArray[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDocument):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocumentArray[MyDoc], **kwargs) -> DocumentArray[MyDoc]:
assert docs.__class__.document_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar', inputs=MyDoc(text='hello'), return_type=DocumentArray[MyDoc]
)
assert docs[0].text == 'hello world'
assert docs.__class__.document_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=Image(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
def test_deployments():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(
self, docs: DocumentArray[InputDoc], **kwargs
) -> DocumentArray[OutputDoc]:
docs_return = DocumentArray[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=Image(tensor=np.zeros((3, 224, 224)))),
return_type=DocumentArray[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.document_type == OutputDoc
|
"""Tests for the InMemoryStore class."""
import pytest
from langchain_core.stores import InMemoryStore
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
class TestInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def three_values(self) -> tuple[str, str, str]:
return "foo", "bar", "buzz"
@pytest.fixture
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
class TestInMemoryStoreAsync(BaseStoreAsyncTests):
@pytest.fixture
def three_values(self) -> tuple[str, str, str]: # type: ignore
return "foo", "bar", "buzz"
@pytest.fixture
async def kv_store(self) -> InMemoryStore:
return InMemoryStore()
|
"""Tests for the InMemoryStore class."""
from typing import Tuple
import pytest
from langchain_core.stores import InMemoryStore
from langchain_tests.integration_tests.base_store import (
BaseStoreAsyncTests,
BaseStoreSyncTests,
)
class TestInMemoryStore(BaseStoreSyncTests):
@pytest.fixture
def three_values(self) -> Tuple[str, str, str]:
return "foo", "bar", "buzz"
@pytest.fixture
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
class TestInMemoryStoreAsync(BaseStoreAsyncTests):
@pytest.fixture
def three_values(self) -> Tuple[str, str, str]: # type: ignore
return "foo", "bar", "buzz"
@pytest.fixture
async def kv_store(self) -> InMemoryStore:
return InMemoryStore()
|
import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to an audio file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an [`AudioNdArray`][docarray.typing.AudioNdArray]
and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing
the audio file content, and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to [`AudioBytes`][docarray.typing.AudioBytes]. This will either load or
download the file and save it into an [`AudioBytes`][docarray.typing.AudioBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`AudioBytes`][docarray.typing.AudioBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to an audio file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an [`AudioNdArray`][docarray.typing.AudioNdArray]
and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing
the audio file content, and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to [`AudioBytes`][docarray.typing.AudioBytes]. This will either load or
download the file and save it into an [`AudioBytes`][docarray.typing.AudioBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`AudioBytes`][docarray.typing.AudioBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"StreamlitChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"StreamlitChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"StreamlitChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"StreamlitChatMessageHistory",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
|
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of FOVEA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of FOVEA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from .functional import add_noise, convolve, fftconvolve
__all__ = ["add_noise", "convolve", "fftconvolve"]
|
from .functional import convolve, fftconvolve
__all__ = ["convolve", "fftconvolve"]
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
from __future__ import annotations
from dataclasses import dataclass, field
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
# Sparsity parameters
sparsity_threshold: float = field(default=0.0, metadata={"help": "Threshold for sparsifying embeddings"})
topk: int | None = field(
default=None,
metadata={"help": "Number of top values to keep in sparse embeddings"},
)
|
import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, _RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.mark.asyncio
async def test_iter_requests():
iter = request_generator(exec_endpoint='/', data=slow_blocking_generator())
count = 0
num_reqs = 0
async def another_task():
nonlocal count
for _ in range(20):
await asyncio.sleep(0.2)
count += 1
task = asyncio.create_task(another_task())
async for _ in AsyncRequestsIterator(iter):
"""Using following code will block the event loop and count will be <5
for _ in iter:
...
"""
num_reqs += 1
task.cancel()
# ideally count will be 20, but to avoid flaky CI
assert count > 15
@pytest.mark.asyncio
async def test_iter_requests_with_prefetch():
max_amount_requests = _RequestsCounter()
counter = _RequestsCounter()
async def consume_requests():
while True:
await asyncio.sleep(0.01)
if counter.count > 0:
counter.count -= 1
async def req_iterator(max_amount_requests):
for i in range(1000):
await asyncio.sleep(0.001)
counter.count += 1
if counter.count > max_amount_requests.count:
max_amount_requests.count = counter.count
yield i
consume_task = asyncio.create_task(consume_requests())
async for _ in AsyncRequestsIterator(
req_iterator(max_amount_requests), counter, 10
):
pass
consume_task.cancel()
assert max_amount_requests.count == 10
|
import asyncio
import time
import pytest
from jina import Document
from jina.clients.request import request_generator
from jina.serve.stream.helper import AsyncRequestsIterator, RequestsCounter
def slow_blocking_generator():
for i in range(2):
yield Document(id=str(i))
time.sleep(2)
@pytest.mark.asyncio
async def test_iter_requests():
iter = request_generator(exec_endpoint='/', data=slow_blocking_generator())
count = 0
num_reqs = 0
async def another_task():
nonlocal count
for _ in range(20):
await asyncio.sleep(0.2)
count += 1
task = asyncio.create_task(another_task())
async for _ in AsyncRequestsIterator(iter):
"""Using following code will block the event loop and count will be <5
for _ in iter:
...
"""
num_reqs += 1
task.cancel()
# ideally count will be 20, but to avoid flaky CI
assert count > 15
@pytest.mark.asyncio
async def test_iter_requests_with_prefetch():
max_amount_requests = RequestsCounter()
counter = RequestsCounter()
async def consume_requests():
while True:
await asyncio.sleep(0.01)
if counter.count > 0:
counter.count -= 1
async def req_iterator(max_amount_requests):
for i in range(1000):
await asyncio.sleep(0.001)
counter.count += 1
if counter.count > max_amount_requests.count:
max_amount_requests.count = counter.count
yield i
consume_task = asyncio.create_task(consume_requests())
async for _ in AsyncRequestsIterator(
req_iterator(max_amount_requests), counter, 10
):
pass
consume_task.cancel()
assert max_amount_requests.count == 10
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import gzip
import json
import os
import torch
import tqdm
from transformers import T5ForConditionalGeneration, T5Tokenizer
from sentence_transformers import util
paragraphs = set()
# We use the Wikipedia articles of certain programming languages
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
paragraphs.add(p)
paragraphs = list(paragraphs)
print("Paragraphs:", len(paragraphs))
# Now we load the model that is able to generate queries given a paragraph.
# This model was trained on the MS MARCO dataset, a dataset with 500k
# queries from Bing and the respective relevant passage
tokenizer = T5Tokenizer.from_pretrained("BeIR/query-gen-msmarco-t5-large-v1")
model = T5ForConditionalGeneration.from_pretrained("BeIR/query-gen-msmarco-t5-large-v1")
model.eval()
# Select the device
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# Parameters for generation
batch_size = 8 # Batch size
num_queries = 5 # Number of queries to generate for every paragraph
max_length_paragraph = 300 # Max length for paragraph
max_length_query = 64 # Max length for output query
# Now for every paragraph in our corpus, we generate the queries
with open("generated_queries.tsv", "w") as fOut:
for start_idx in tqdm.trange(0, len(paragraphs), batch_size):
sub_paragraphs = paragraphs[start_idx : start_idx + batch_size]
inputs = tokenizer.prepare_seq2seq_batch(
sub_paragraphs, max_length=max_length_paragraph, truncation=True, return_tensors="pt"
).to(device)
outputs = model.generate(
**inputs, max_length=max_length_query, do_sample=True, top_p=0.95, num_return_sequences=num_queries
)
for idx, out in enumerate(outputs):
query = tokenizer.decode(out, skip_special_tokens=True)
para = sub_paragraphs[int(idx / num_queries)]
fOut.write("{}\t{}\n".format(query.replace("\t", " ").strip(), para.replace("\t", " ").strip()))
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import json
import gzip
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
import tqdm
import os
from sentence_transformers import util
paragraphs = set()
# We use the Wikipedia articles of certain programming languages
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
paragraphs.add(p)
paragraphs = list(paragraphs)
print("Paragraphs:", len(paragraphs))
# Now we load the model that is able to generate queries given a paragraph.
# This model was trained on the MS MARCO dataset, a dataset with 500k
# queries from Bing and the respective relevant passage
tokenizer = T5Tokenizer.from_pretrained("BeIR/query-gen-msmarco-t5-large-v1")
model = T5ForConditionalGeneration.from_pretrained("BeIR/query-gen-msmarco-t5-large-v1")
model.eval()
# Select the device
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# Parameters for generation
batch_size = 8 # Batch size
num_queries = 5 # Number of queries to generate for every paragraph
max_length_paragraph = 300 # Max length for paragraph
max_length_query = 64 # Max length for output query
# Now for every paragraph in our corpus, we generate the queries
with open("generated_queries.tsv", "w") as fOut:
for start_idx in tqdm.trange(0, len(paragraphs), batch_size):
sub_paragraphs = paragraphs[start_idx : start_idx + batch_size]
inputs = tokenizer.prepare_seq2seq_batch(
sub_paragraphs, max_length=max_length_paragraph, truncation=True, return_tensors="pt"
).to(device)
outputs = model.generate(
**inputs, max_length=max_length_query, do_sample=True, top_p=0.95, num_return_sequences=num_queries
)
for idx, out in enumerate(outputs):
query = tokenizer.decode(out, skip_special_tokens=True)
para = sub_paragraphs[int(idx / num_queries)]
fOut.write("{}\t{}\n".format(query.replace("\t", " ").strip(), para.replace("\t", " ").strip()))
|
from __future__ import annotations
from typing_extensions import deprecated
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder.evaluation.CEClassificationEvaluator import CEClassificationEvaluator
@deprecated(
"This evaluator has been deprecated in favor of the more general CEClassificationEvaluator. "
"Please use CEClassificationEvaluator instead, which supports both binary and multi-class "
"evaluation. It accepts approximately the same inputs as this evaluator."
)
class CEBinaryClassificationEvaluator(CEClassificationEvaluator):
"""
This evaluator has been deprecated in favor of the more general CEClassificationEvaluator.
"""
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info(f"Accuracy: {acc * 100:.2f}\t(Threshold: {acc_threshold:.4f})")
logger.info(f"F1: {f1 * 100:.2f}\t(Threshold: {f1_threshold:.4f})")
logger.info(f"Precision: {precision * 100:.2f}")
logger.info(f"Recall: {recall * 100:.2f}")
logger.info(f"Average Precision: {ap * 100:.2f}\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
.. code-block:: python
from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
llm = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(
lambda x: text_splitter.create_documents([x])
)
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
)
)
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: Optional[int] = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
"""
Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> list[str]:
return [self.input_key]
@property
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, list]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs], run_manager=run_manager
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
|
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
.. code-block:: python
from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
llm = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(
lambda x: text_splitter.create_documents([x])
)
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
)
)
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: Optional[int] = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
"""
Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> list[str]:
return [self.input_key]
@property
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, list]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs], run_manager=run_manager
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import csv
import logging
import math
import os
from datetime import datetime
from zipfile import ZipFile
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
from zipfile import ZipFile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""base multi modal retriever."""
from abc import abstractmethod
from typing import List
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.image_retriever import BaseImageRetriever
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
class MultiModalRetriever(BaseRetriever, BaseImageRetriever):
"""Multi Modal base retriever."""
@abstractmethod
def text_retrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
"""
Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
def text_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
def image_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given image query.
Implemented by the user.
"""
@abstractmethod
async def atext_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def atext_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def aimage_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given image query.
Implemented by the user.
"""
|
"""base multi modal retriever."""
from abc import abstractmethod
from typing import List
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.image_retriever import BaseImageRetriever
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
class MultiModalRetriever(BaseRetriever, BaseImageRetriever):
"""Multi Modal base retriever."""
@abstractmethod
def text_retrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
"""Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
def text_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
def image_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Retrieve image nodes given image query.
Implemented by the user.
"""
@abstractmethod
async def atext_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Async Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def atext_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Async Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def aimage_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Async Retrieve image nodes given image query.
Implemented by the user.
"""
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'TensorFlowTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
pass
else:
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowTensor'])
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, demo_track_inputs,
get_detector_cfg, get_roi_head_cfg, random_boxes,
replace_to_ceph)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results', 'replace_to_ceph',
'demo_track_inputs', 'VideoDataSampleFeeder', 'random_boxes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg, replace_to_ceph)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results', 'replace_to_ceph'
]
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import PlotMixin, ProtoMixin
from docarray.typing import ID
_console: Console = Console()
class BaseDocument(BaseModel, PlotMixin, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self):
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
def _get_string_for_regex_filter(self):
return str(self)
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import PlotMixin, ProtoMixin
from docarray.typing import ID
_console: Console = Console()
class BaseDocument(BaseModel, PlotMixin, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self):
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
|
"""Test LLM program."""
from unittest.mock import MagicMock
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
LLMMetadata,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.bridge.pydantic import BaseModel
from typing import List, Optional, Union, Any
from llama_index.core.tools.types import BaseTool
from llama_index.core.chat_engine.types import AgentChatResponse
from llama_index.core.tools import ToolOutput
from llama_index.core.program import FunctionCallingProgram
class MockSong(BaseModel):
"""Mock Song class."""
title: str
class MockAlbum(BaseModel):
title: str
artist: str
songs: List[MockSong]
MOCK_ALBUM = MockAlbum(
title="hello",
artist="world",
songs=[MockSong(title="song1"), MockSong(title="song2")],
)
MOCK_ALBUM_2 = MockAlbum(
title="hello2",
artist="world2",
songs=[MockSong(title="song3"), MockSong(title="song4")],
)
def _get_mock_album_response(
allow_parallel_tool_calls: bool = False,
) -> AgentChatResponse:
"""Get mock album."""
if allow_parallel_tool_calls:
albums = [MOCK_ALBUM, MOCK_ALBUM_2]
else:
albums = [MOCK_ALBUM]
tool_outputs = [
ToolOutput(
content=str(a),
tool_name="tool_output",
raw_input={},
raw_output=a,
)
for a in albums
]
# return tool outputs
return AgentChatResponse(
response="output",
sources=tool_outputs,
)
class MockLLM(MagicMock):
def predict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
"""Predict and call the tool."""
return _get_mock_album_response(
allow_parallel_tool_calls=allow_parallel_tool_calls
)
async def apredict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
"""Predict and call the tool."""
return _get_mock_album_response(
allow_parallel_tool_calls=allow_parallel_tool_calls
)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
def test_function_program() -> None:
"""Test Function program."""
prompt_template_str = """This is a test album with {topic}"""
llm_program = FunctionCallingProgram.from_defaults(
output_cls=MockAlbum,
prompt_template_str=prompt_template_str,
llm=MockLLM(),
)
obj_output = llm_program(topic="songs")
assert isinstance(obj_output, MockAlbum)
assert obj_output.title == "hello"
assert obj_output.artist == "world"
assert obj_output.songs[0].title == "song1"
assert obj_output.songs[1].title == "song2"
def test_function_program_multiple() -> None:
"""Test Function program multiple."""
prompt_template_str = """This is a test album with {topic}"""
llm_program = FunctionCallingProgram.from_defaults(
output_cls=MockAlbum,
prompt_template_str=prompt_template_str,
llm=MockLLM(),
allow_parallel_tool_calls=True,
)
obj_outputs = llm_program(topic="songs")
assert isinstance(obj_outputs, list)
assert len(obj_outputs) == 2
assert isinstance(obj_outputs[0], MockAlbum)
assert isinstance(obj_outputs[1], MockAlbum)
# test second output
assert obj_outputs[1].title == "hello2"
assert obj_outputs[1].artist == "world2"
assert obj_outputs[1].songs[0].title == "song3"
assert obj_outputs[1].songs[1].title == "song4"
@pytest.mark.asyncio
async def test_async_function_program() -> None:
"""Test async function program."""
# same as above but async
prompt_template_str = """This is a test album with {topic}"""
llm_program = FunctionCallingProgram.from_defaults(
output_cls=MockAlbum,
prompt_template_str=prompt_template_str,
llm=MockLLM(),
)
obj_output = await llm_program.acall(topic="songs")
assert isinstance(obj_output, MockAlbum)
assert obj_output.title == "hello"
assert obj_output.artist == "world"
assert obj_output.songs[0].title == "song1"
assert obj_output.songs[1].title == "song2"
|
"""Test LLM program."""
from unittest.mock import MagicMock
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
LLMMetadata,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.bridge.pydantic import BaseModel
from typing import List, Optional, Union, Any
from llama_index.core.tools.types import BaseTool
from llama_index.core.chat_engine.types import AgentChatResponse
from llama_index.core.tools import ToolOutput
from llama_index.core.program import FunctionCallingProgram
class MockSong(BaseModel):
"""Mock Song class."""
title: str
class MockAlbum(BaseModel):
title: str
artist: str
songs: List[MockSong]
MOCK_ALBUM = MockAlbum(
title="hello",
artist="world",
songs=[MockSong(title="song1"), MockSong(title="song2")],
)
MOCK_ALBUM_2 = MockAlbum(
title="hello2",
artist="world2",
songs=[MockSong(title="song3"), MockSong(title="song4")],
)
def _get_mock_album_response(
allow_parallel_tool_calls: bool = False,
) -> AgentChatResponse:
"""Get mock album."""
if allow_parallel_tool_calls:
albums = [MOCK_ALBUM, MOCK_ALBUM_2]
else:
albums = [MOCK_ALBUM]
tool_outputs = [
ToolOutput(
content=str(a),
tool_name="tool_output",
raw_input={},
raw_output=a,
)
for a in albums
]
# return tool outputs
return AgentChatResponse(
response="output",
sources=tool_outputs,
)
class MockLLM(MagicMock):
def predict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
"""Predict and call the tool."""
return _get_mock_album_response(
allow_parallel_tool_calls=allow_parallel_tool_calls
)
async def apredict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
"""Predict and call the tool."""
return _get_mock_album_response(
allow_parallel_tool_calls=allow_parallel_tool_calls
)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
def test_function_program() -> None:
"""Test Function program."""
prompt_template_str = """This is a test album with {topic}"""
llm_program = FunctionCallingProgram.from_defaults(
output_cls=MockAlbum,
prompt_template_str=prompt_template_str,
llm=MockLLM(),
)
obj_output = llm_program(topic="songs")
assert isinstance(obj_output, MockAlbum)
assert obj_output.title == "hello"
assert obj_output.artist == "world"
assert obj_output.songs[0].title == "song1"
assert obj_output.songs[1].title == "song2"
def test_function_program_multiple() -> None:
"""Test Function program multiple."""
prompt_template_str = """This is a test album with {topic}"""
llm_program = FunctionCallingProgram.from_defaults(
output_cls=MockAlbum,
prompt_template_str=prompt_template_str,
llm=MockLLM(),
allow_parallel_tool_calls=True,
)
obj_outputs = llm_program(topic="songs")
assert isinstance(obj_outputs, list)
assert len(obj_outputs) == 2
assert isinstance(obj_outputs[0], MockAlbum)
assert isinstance(obj_outputs[1], MockAlbum)
# test second output
assert obj_outputs[1].title == "hello2"
assert obj_outputs[1].artist == "world2"
assert obj_outputs[1].songs[0].title == "song3"
assert obj_outputs[1].songs[1].title == "song4"
@pytest.mark.asyncio()
async def test_async_function_program() -> None:
"""Test async function program."""
# same as above but async
prompt_template_str = """This is a test album with {topic}"""
llm_program = FunctionCallingProgram.from_defaults(
output_cls=MockAlbum,
prompt_template_str=prompt_template_str,
llm=MockLLM(),
)
obj_output = await llm_program.acall(topic="songs")
assert isinstance(obj_output, MockAlbum)
assert obj_output.title == "hello"
assert obj_output.artist == "world"
assert obj_output.songs[0].title == "song1"
assert obj_output.songs[1].title == "song2"
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTranslationEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTranslationEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
print(results)
print(translation_evaluator.primary_metric)
print(results[translation_evaluator.primary_metric])
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
preprocess_cfg=None,
pretrained=None,
init_cfg=None):
super(FCOS, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackTrackInputs,
ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackTrackInputs,
ToTensor, Transpose)
from .frame_sampling import UniformSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'UniformSample', 'PackTrackInputs'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredMarkdownLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UnstructuredMarkdownLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UnstructuredMarkdownLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredMarkdownLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UnstructuredMarkdownLoader": "langchain_community.document_loaders"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UnstructuredMarkdownLoader",
]
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = jax.default_backend()
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# Need at least one GPU for the following testing.
if not torch.cuda.is_available():
return
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_meta_device(self):
import torch
with torch.device("meta"):
x = torch.ones(5)
t = backend.convert_to_tensor(x)
if not torch.cuda.is_available():
self.assertEqual(t.device, torch.device("cpu"))
else:
self.assertEqual(t.device, torch.device("cuda", 0))
|
import pytest
from keras.src import backend
from keras.src import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
def get_device(t):
# After updating to Jax 0.4.33, Directly access via t.device attr.
return list(t.devices())[0]
platform = jax.default_backend()
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(get_device(t), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_meta_device(self):
import torch
with torch.device("meta"):
x = torch.ones(5)
t = backend.convert_to_tensor(x)
self.assertEqual(t.device, torch.device("cpu"))
|
"""Standard LangChain interface tests"""
import os
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
"""Standard LangChain interface tests"""
import os
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Union
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.loader = loader
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
import os
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader
from .utils import check_integrity, download_and_extract_archive, download_url
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
def __init__(
self,
root: Union[str, Path],
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = True,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.loader = loader
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, "dataset", photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, "dataset", self.photos[index])
img = self.loader(filename)
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self) -> bool:
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self) -> None:
"""Download and extract the tarball, and download each individual photo."""
if self._check_integrity():
return
download_and_extract_archive(self.url, self.root, self.root, self.filename, self.md5_checksum)
# Download individual photos
with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, "dataset"))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import (
SKLearnClassifier as SKLearnClassifier,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnRegressor as SKLearnRegressor,
)
from keras.src.wrappers.sklearn_wrapper import (
SKLearnTransformer as SKLearnTransformer,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.wrappers.sklearn_wrapper import SKLearnClassifier
from keras.src.wrappers.sklearn_wrapper import SKLearnRegressor
from keras.src.wrappers.sklearn_wrapper import SKLearnTransformer
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
test_cfg = dict(topK=100, max_per_img=100)
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_bboxes():
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': np.array([1., 1., 1., 1.]),
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
'flip': False
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = self.get_targets(gt_bboxes, gt_labels, self.feat_shape,
img_metas[0]['pad_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
# make sure get_bboxes is right
detections = self.get_bboxes([center_target], [wh_target], [offset_target],
img_metas,
rescale=True,
with_nms=False)
out_bboxes = detections[0][0][:3]
out_clses = detections[0][1][:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
|
import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
test_cfg = dict(topK=100, max_per_img=100)
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_bboxes():
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': np.array([1., 1., 1., 1.]),
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
'flip': False
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = self.get_targets(gt_bboxes, gt_labels, self.feat_shape,
img_metas[0]['pad_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
# make sure get_bboxes is right
detections = self.get_bboxes([center_target], [wh_target], [offset_target],
img_metas,
rescale=True,
with_nms=False)
out_bboxes = detections[0][0][:3]
out_clses = detections[0][1][:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, datapoints.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, datapoints.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
try:
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP tokenizer and set the `tokenizer_path` argument.'
)
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
from typing import Dict, Optional, Sequence
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPModel, CLIPTokenizer
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g., ./my_model_directory/
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Device to be used. Use 'cuda' for GPU.
:param default_traversal_paths: Default traversal paths for encoding, used if the
traversal path is not passed as a parameter with the request.
:param default_batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
:param args: Arguments
:param kwargs: Keyword Arguments
"""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: Optional[int] = 77,
device: str = 'cpu',
default_traversal_paths: Sequence[str] = ['r'],
default_batch_size: int = 32,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode text data into a ndarray of `D` as dimension, and fill
the embedding attribute of the docs.
:param docs: DocumentArray containing text
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
for docs_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = docs_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embeddings = self.model.get_text_features(**input_tokens).cpu().numpy()
for doc, embedding in zip(docs_batch, embeddings):
doc.embedding = embedding
def _generate_input_tokens(self, texts: Sequence[str]):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {k: v.to(self.device) for k, v in input_tokens.items()}
return input_tokens
|
import os
from typing import Dict, List, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPTokenizer, CLIPModel
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g., ./my_model_directory/
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Device to be used. Use 'cuda' for GPU.
:param default_traversal_paths: Default traversal paths for encoding, used if the
traversal path is not passed as a parameter with the request.
:param default_batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
:param args: Arguments
:param kwargs: Keyword Arguments
"""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: Optional[int] = 77,
device: str = 'cpu',
default_traversal_paths: List[str] = ['r'],
default_batch_size: int = 32,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.logger = JinaLogger(self.__class__.__name__)
if device.startswith('cuda') and not torch.cuda.is_available():
self.logger.warning(
'You tried to use GPU but torch did not detect your'
'GPU correctly. Defaulting to CPU. Check your CUDA installation!'
)
device = 'cpu'
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(torch.device(device))
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode text data into a ndarray of `D` as dimension, and fill
the embedding attribute of the docs.
:param docs: DocumentArray containing text
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
for document_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = document_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embedding_batch = self.model.get_text_features(**input_tokens)
numpy_embedding_batch = embedding_batch.cpu().numpy()
for document, numpy_embedding in zip(
document_batch, numpy_embedding_batch
):
document.embedding = numpy_embedding
def _generate_input_tokens(self, texts):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {
k: v.to(torch.device(self.device)) for k, v in input_tokens.items()
}
return input_tokens
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, LOOPS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
from typing import Any, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.graph_stores.types import PropertyGraphStore
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.llms import LLM
from llama_index.core.prompts import PromptTemplate
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import Settings
class CypherTemplateRetriever(BasePGRetriever):
"""
A Cypher retriever that fills in params for a cypher query using an LLM.
Args:
graph_store (PropertyGraphStore):
The graph store to retrieve data from.
output_cls (Type[BaseModel]):
The output class to use for the LLM.
Should contain the params needed for the cypher query.
cypher_query (str):
The cypher query to use, with templated params.
llm (Optional[LLM], optional):
The language model to use. Defaults to Settings.llm.
"""
def __init__(
self,
graph_store: PropertyGraphStore,
output_cls: Type[BaseModel],
cypher_query: str,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> None:
if not graph_store.supports_structured_queries:
raise ValueError(
"The provided graph store does not support cypher queries."
)
self.llm = llm or Settings.llm
# Explicit type hint to suppress:
# `Expected type '_SpecialForm[BaseModel]', got 'Type[BaseModel]' instead`
self.output_cls: Type[BaseModel] = output_cls
self.cypher_query = cypher_query
super().__init__(
graph_store=graph_store, include_text=False, include_properties=False
)
def retrieve_from_graph(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
question = query_bundle.query_str
response = self.llm.structured_predict(
self.output_cls, PromptTemplate(question)
)
cypher_response = self._graph_store.structured_query(
self.cypher_query,
param_map=response.model_dump(),
)
return [
NodeWithScore(
node=TextNode(
text=str(cypher_response),
),
score=1.0,
)
]
async def aretrieve_from_graph(
self, query_bundle: QueryBundle
) -> List[NodeWithScore]:
question = query_bundle.query_str
response = await self.llm.astructured_predict(
self.output_cls, PromptTemplate(question)
)
cypher_response = await self._graph_store.astructured_query(
self.cypher_query,
param_map=response.model_dump(),
)
return [
NodeWithScore(
node=TextNode(
text=str(cypher_response),
),
score=1.0,
)
]
|
from typing import Any, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.graph_stores.types import PropertyGraphStore
from llama_index.core.indices.property_graph.sub_retrievers.base import BasePGRetriever
from llama_index.core.llms import LLM
from llama_index.core.prompts import PromptTemplate
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import Settings
class CypherTemplateRetriever(BasePGRetriever):
"""A Cypher retriever that fills in params for a cypher query using an LLM.
Args:
graph_store (PropertyGraphStore):
The graph store to retrieve data from.
output_cls (Type[BaseModel]):
The output class to use for the LLM.
Should contain the params needed for the cypher query.
cypher_query (str):
The cypher query to use, with templated params.
llm (Optional[LLM], optional):
The language model to use. Defaults to Settings.llm.
"""
def __init__(
self,
graph_store: PropertyGraphStore,
output_cls: Type[BaseModel],
cypher_query: str,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> None:
if not graph_store.supports_structured_queries:
raise ValueError(
"The provided graph store does not support cypher queries."
)
self.llm = llm or Settings.llm
# Explicit type hint to suppress:
# `Expected type '_SpecialForm[BaseModel]', got 'Type[BaseModel]' instead`
self.output_cls: Type[BaseModel] = output_cls
self.cypher_query = cypher_query
super().__init__(
graph_store=graph_store, include_text=False, include_properties=False
)
def retrieve_from_graph(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
question = query_bundle.query_str
response = self.llm.structured_predict(
self.output_cls, PromptTemplate(question)
)
cypher_response = self._graph_store.structured_query(
self.cypher_query,
param_map=response.model_dump(),
)
return [
NodeWithScore(
node=TextNode(
text=str(cypher_response),
),
score=1.0,
)
]
async def aretrieve_from_graph(
self, query_bundle: QueryBundle
) -> List[NodeWithScore]:
question = query_bundle.query_str
response = await self.llm.astructured_predict(
self.output_cls, PromptTemplate(question)
)
cypher_response = await self._graph_store.astructured_query(
self.cypher_query,
param_map=response.model_dump(),
)
return [
NodeWithScore(
node=TextNode(
text=str(cypher_response),
),
score=1.0,
)
]
|
from langchain_core.agents import AgentAction
def format_log_to_str(
intermediate_steps: list[tuple[AgentAction, str]],
observation_prefix: str = "Observation: ",
llm_prefix: str = "Thought: ",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
observation_prefix: Prefix to append the observation with.
Defaults to "Observation: ".
llm_prefix: Prefix to append the llm call with.
Defaults to "Thought: ".
Returns:
str: The scratchpad.
"""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts
|
from typing import List, Tuple
from langchain_core.agents import AgentAction
def format_log_to_str(
intermediate_steps: List[Tuple[AgentAction, str]],
observation_prefix: str = "Observation: ",
llm_prefix: str = "Thought: ",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process.
Args:
intermediate_steps: List of tuples of AgentAction and observation strings.
observation_prefix: Prefix to append the observation with.
Defaults to "Observation: ".
llm_prefix: Prefix to append the llm call with.
Defaults to "Thought: ".
Returns:
str: The scratchpad.
"""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def populate_database():
import glob
import json
import pathlib
import requests
import market.model
templates = pathlib.Path(__file__).parent.parent / "graph_templates"
all_files = glob.glob(str(templates / "*.json"))
for file in all_files:
with open(file, "r") as f:
data = f.read()
req = market.model.AddAgentRequest(
graph=json.loads(data),
author="Populate DB",
categories=["Pre-Populated"],
keywords=["test"],
)
response = requests.post(
"http://localhost:8015/api/v1/market/admin/agent", json=req.model_dump()
)
print(response.text)
def format():
run("ruff", "check", "--fix", ".")
run("isort", "--profile", "black", ".")
run("black", ".")
run("pyright", ".")
def app():
port = os.getenv("PORT", "8015")
run("uvicorn", "market.app:app", "--reload", "--port", port, "--host", "0.0.0.0")
def setup():
run("prisma", "generate")
run("prisma", "migrate", "deploy")
|
import os
import subprocess
directory = os.path.dirname(os.path.realpath(__file__))
def run(*command: str) -> None:
print(f">>>>> Running poetry run {' '.join(command)}")
subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True)
def lint():
try:
run("ruff", "check", ".", "--exit-zero")
run("isort", "--diff", "--check", "--profile", "black", ".")
run("black", "--diff", "--check", ".")
run("pyright")
except subprocess.CalledProcessError as e:
print("Lint failed, try running `poetry run format` to fix the issues: ", e)
raise e
def populate_database():
import glob
import json
import pathlib
import requests
import market.model
templates = pathlib.Path(__file__).parent.parent / "backend" / "graph_templates"
all_files = glob.glob(str(templates / "*.json"))
for file in all_files:
with open(file, "r") as f:
data = f.read()
req = market.model.AddAgentRequest(
graph=json.loads(data),
author="Populate DB",
categories=["Pre-Populated"],
keywords=["test"],
)
response = requests.post(
"http://localhost:8015/api/v1/market/admin/agent", json=req.model_dump()
)
print(response.text)
def format():
run("ruff", "check", "--fix", ".")
run("isort", "--profile", "black", ".")
run("black", ".")
run("pyright", ".")
def app():
port = os.getenv("PORT", "8015")
run("uvicorn", "market.app:app", "--reload", "--port", port, "--host", "0.0.0.0")
def setup():
run("prisma", "generate")
run("prisma", "migrate", "deploy")
|
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DMultiControlNetModel,
)
from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
from .controlnet_sparsectrl import (
SparseControlNetConditioningEmbedding,
SparseControlNetModel,
SparseControlNetOutput,
)
from .controlnet_union import ControlNetUnionModel
from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
from .multicontrolnet import MultiControlNetModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
|
from ...utils import is_flax_available, is_torch_available
if is_torch_available():
from .controlnet import ControlNetModel, ControlNetOutput
from .controlnet_flux import FluxControlNetModel, FluxControlNetOutput, FluxMultiControlNetModel
from .controlnet_hunyuan import (
HunyuanControlNetOutput,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DMultiControlNetModel,
)
from .controlnet_sd3 import SD3ControlNetModel, SD3ControlNetOutput, SD3MultiControlNetModel
from .controlnet_sparsectrl import (
SparseControlNetConditioningEmbedding,
SparseControlNetModel,
SparseControlNetOutput,
)
from .controlnet_union import ControlNetUnionInput, ControlNetUnionInputProMax, ControlNetUnionModel
from .controlnet_xs import ControlNetXSAdapter, ControlNetXSOutput, UNetControlNetXSModel
from .multicontrolnet import MultiControlNetModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.10.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.9.0"
@keras_export("keras.version")
def version():
return __version__
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from diffusers.utils.testing_utils import (
floats_tensor,
require_torch,
require_torch_accelerator_with_training,
torch_all_close,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
@require_torch
class UNetBlockTesterMixin:
@property
def dummy_input(self):
return self.get_dummy_input()
@property
def output_shape(self):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.")
def get_dummy_input(
self,
include_temb=True,
include_res_hidden_states_tuple=False,
include_encoder_hidden_states=False,
include_skip_sample=False,
):
batch_size = 4
num_channels = 32
sizes = (32, 32)
generator = torch.manual_seed(0)
device = torch.device(torch_device)
shape = (batch_size, num_channels) + sizes
hidden_states = randn_tensor(shape, generator=generator, device=device)
dummy_input = {"hidden_states": hidden_states}
if include_temb:
temb_channels = 128
dummy_input["temb"] = randn_tensor((batch_size, temb_channels), generator=generator, device=device)
if include_res_hidden_states_tuple:
generator_1 = torch.manual_seed(1)
dummy_input["res_hidden_states_tuple"] = (randn_tensor(shape, generator=generator_1, device=device),)
if include_encoder_hidden_states:
dummy_input["encoder_hidden_states"] = floats_tensor((batch_size, 32, 32)).to(torch_device)
if include_skip_sample:
dummy_input["skip_sample"] = randn_tensor(((batch_size, 3) + sizes), generator=generator, device=device)
return dummy_input
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
init_dict["prev_output_channel"] = 32
if self.block_type == "mid":
init_dict.pop("out_channels")
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self, expected_slice):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
unet_block = self.block_class(**init_dict)
unet_block.to(torch_device)
unet_block.eval()
with torch.no_grad():
output = unet_block(**inputs_dict)
if isinstance(output, Tuple):
output = output[0]
self.assertEqual(output.shape, self.output_shape)
output_slice = output[0, -1, -3:, -3:]
expected_slice = torch.tensor(expected_slice).to(torch_device)
assert torch_all_close(output_slice.flatten(), expected_slice, atol=5e-3)
@require_torch_accelerator_with_training
def test_training(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.block_class(**init_dict)
model.to(torch_device)
model.train()
output = model(**inputs_dict)
if isinstance(output, Tuple):
output = output[0]
device = torch.device(torch_device)
noise = randn_tensor(output.shape, device=device)
loss = torch.nn.functional.mse_loss(output, noise)
loss.backward()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from diffusers.utils.testing_utils import (
floats_tensor,
require_torch,
require_torch_accelerator_with_training,
torch_all_close,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
@require_torch
class UNetBlockTesterMixin:
@property
def dummy_input(self):
return self.get_dummy_input()
@property
def output_shape(self):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.")
def get_dummy_input(
self,
include_temb=True,
include_res_hidden_states_tuple=False,
include_encoder_hidden_states=False,
include_skip_sample=False,
):
batch_size = 4
num_channels = 32
sizes = (32, 32)
generator = torch.manual_seed(0)
device = torch.device(torch_device)
shape = (batch_size, num_channels) + sizes
hidden_states = randn_tensor(shape, generator=generator, device=device)
dummy_input = {"hidden_states": hidden_states}
if include_temb:
temb_channels = 128
dummy_input["temb"] = randn_tensor((batch_size, temb_channels), generator=generator, device=device)
if include_res_hidden_states_tuple:
generator_1 = torch.manual_seed(1)
dummy_input["res_hidden_states_tuple"] = (randn_tensor(shape, generator=generator_1, device=device),)
if include_encoder_hidden_states:
dummy_input["encoder_hidden_states"] = floats_tensor((batch_size, 32, 32)).to(torch_device)
if include_skip_sample:
dummy_input["skip_sample"] = randn_tensor(((batch_size, 3) + sizes), generator=generator, device=device)
return dummy_input
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
init_dict["prev_output_channel"] = 32
if self.block_type == "mid":
init_dict.pop("out_channels")
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self, expected_slice):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
unet_block = self.block_class(**init_dict)
unet_block.to(torch_device)
unet_block.eval()
with torch.no_grad():
output = unet_block(**inputs_dict)
if isinstance(output, Tuple):
output = output[0]
self.assertEqual(output.shape, self.output_shape)
output_slice = output[0, -1, -3:, -3:]
expected_slice = torch.tensor(expected_slice).to(torch_device)
assert torch_all_close(output_slice.flatten(), expected_slice, atol=5e-3)
@require_torch_accelerator_with_training
def test_training(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.block_class(**init_dict)
model.to(torch_device)
model.train()
output = model(**inputs_dict)
if isinstance(output, Tuple):
output = output[0]
device = torch.device(torch_device)
noise = randn_tensor(output.shape, device=device)
loss = torch.nn.functional.mse_loss(output, noise)
loss.backward()
|
import json
import logging
from typing import List
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
@deprecated(
since="0.0.31",
message=(
"This class is deprecated and will be removed in a future version. "
"You can swap to using the `PostgresChatMessageHistory`"
" implementation in `langchain_postgres`. "
"Please do not submit further PRs to this class."
"See <https://github.com/langchain-ai/langchain-postgres>"
),
alternative="from langchain_postgres import PostgresChatMessageHistory;",
pending=True,
)
class PostgresChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Postgres database.
**DEPRECATED**: This class is deprecated and will be removed in a future version.
Use the `PostgresChatMessageHistory` implementation in `langchain_postgres`.
"""
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from PostgreSQL"""
query = (
f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;"
)
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query, (self.session_id, json.dumps(message_to_dict(message)))
)
self.connection.commit()
def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
|
import json
import logging
from typing import List
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
@deprecated(
since="0.0.31",
message=(
"This class is deprecated and will be removed in a future version. "
"You can swap to using the `PostgresChatMessageHistory`"
" implementation in `langchain_postgres`. "
"Please do not submit further PRs to this class."
"See <https://github.com/langchain-ai/langchain-postgres>"
),
alternative="from langchain_postgres import PostgresChatMessageHistory;",
pending=True,
)
class PostgresChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Postgres database.
**DEPRECATED**: This class is deprecated and will be removed in a future version.
Use the `PostgresChatMessageHistory` implementation in `langchain_postgres`.
"""
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from PostgreSQL"""
query = (
f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;"
)
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query, (self.session_id, json.dumps(message_to_dict(message)))
)
self.connection.commit()
def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
if not hasattr(llm, "bind_tools"):
msg = "This function requires a bind_tools() method be implemented on the LLM."
raise ValueError(
msg,
)
llm_with_tools = llm.bind_tools(tools)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
return agent
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
if not hasattr(llm, "bind_tools"):
raise ValueError(
"This function requires a .bind_tools method be implemented on the LLM.",
)
llm_with_tools = llm.bind_tools(tools)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
return agent
|
from llama_index.core.node_parser.text.sentence_window import (
SentenceWindowNodeParser,
)
from llama_index.core.schema import Document
def test_split_and_window() -> None:
document = Document(text="This is a test 1. This is a test 2. This is a test 3.")
node_parser = SentenceWindowNodeParser.from_defaults()
nodes = node_parser.get_nodes_from_documents([document])
assert len(nodes) == 3
assert nodes[0].get_content() == "This is a test 1. "
assert nodes[1].get_content() == "This is a test 2. "
assert nodes[2].get_content() == "This is a test 3."
assert (
"".join(nodes[0].metadata["window"])
== "This is a test 1. This is a test 2. This is a test 3."
)
assert nodes[0].metadata["original_text"] == "This is a test 1. "
|
from llama_index.core.node_parser.text.sentence_window import (
SentenceWindowNodeParser,
)
from llama_index.core.schema import Document
def test_split_and_window() -> None:
document = Document(text="This is a test 1. This is a test 2. This is a test 3.")
node_parser = SentenceWindowNodeParser.from_defaults()
nodes = node_parser.get_nodes_from_documents([document])
assert len(nodes) == 3
assert nodes[0].get_content() == "This is a test 1."
assert nodes[1].get_content() == "This is a test 2."
assert nodes[2].get_content() == "This is a test 3."
assert (
" ".join(nodes[0].metadata["window"])
== "This is a test 1. This is a test 2. Thius is a test 3."
)
assert nodes[0].metadata["original_text"] == "This is a test 1."
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDoc):
d: SimpleDoc
tens: NdArray[50]
def test_persist_and_restore(tmp_path):
query = SimpleDoc(tens=np.random.random((10,)))
# create index
_ = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
# load existing index file
index = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
assert index.num_docs() == 0
index.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(10)])
assert index.num_docs() == 10
find_results_before = index.find(query, search_field='tens', limit=5)
# delete and restore
del index
index = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
assert index.num_docs() == 10
find_results_after = index.find(query, search_field='tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert np.allclose(doc_before.tens, doc_after.tens)
# add new data
index.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(5)])
assert index.num_docs() == 15
def test_persist_and_restore_nested(tmp_path):
query = NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
# create index
index = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
index.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(10)
]
)
assert index.num_docs() == 10
find_results_before = index.find(query, search_field='d__tens', limit=5)
# delete and restore
del index
index = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
assert index.num_docs() == 10
find_results_after = index.find(query, search_field='d__tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert np.allclose(doc_before.tens, doc_after.tens)
# delete and restore
index.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(5)
]
)
assert index.num_docs() == 15
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDoc):
d: SimpleDoc
tens: NdArray[50]
def test_persist_and_restore(tmp_path):
query = SimpleDoc(tens=np.random.random((10,)))
# create index
index = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
# load existing index file
index = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
assert index.num_docs() == 0
index.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(10)])
assert index.num_docs() == 10
find_results_before = index.find(query, search_field='tens', limit=5)
# delete and restore
del index
index = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
assert index.num_docs() == 10
find_results_after = index.find(query, search_field='tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert (doc_before.tens == doc_after.tens).all()
# add new data
index.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(5)])
assert index.num_docs() == 15
def test_persist_and_restore_nested(tmp_path):
query = NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
# create index
index = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
index.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(10)
]
)
assert index.num_docs() == 10
find_results_before = index.find(query, search_field='d__tens', limit=5)
# delete and restore
del index
index = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
assert index.num_docs() == 10
find_results_after = index.find(query, search_field='d__tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert (doc_before.tens == doc_after.tens).all()
# delete and restore
index.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(5)
]
)
assert index.num_docs() == 15
|
import pytest
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(EMFORMER_RNNT_BASE_LIBRISPEECH, "en", "i have that curiosity beside me at this moment"),
(EMFORMER_RNNT_BASE_MUSTC, "en", "I had that curiosity beside me at this moment."),
(EMFORMER_RNNT_BASE_TEDLIUM3, "en", "i had that curiosity beside me at this moment"),
],
)
def test_rnnt(bundle, sample_speech, expected):
feature_extractor = bundle.get_feature_extractor()
decoder = bundle.get_decoder().eval()
token_processor = bundle.get_token_processor()
waveform, _ = torchaudio.load(sample_speech)
features, length = feature_extractor(waveform.squeeze())
hypotheses = decoder(features, length, 10)
text = token_processor(hypotheses[0][0])
assert text == expected
|
import pytest
import torchaudio
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
@pytest.mark.parametrize(
"bundle,lang,expected",
[(EMFORMER_RNNT_BASE_LIBRISPEECH, "en", "i have that curiosity beside me at this moment")],
)
def test_rnnt(bundle, sample_speech, expected):
feature_extractor = bundle.get_feature_extractor()
decoder = bundle.get_decoder().eval()
token_processor = bundle.get_token_processor()
waveform, _ = torchaudio.load(sample_speech)
features, length = feature_extractor(waveform.squeeze())
hypotheses = decoder(features, length, 10)
text = token_processor(hypotheses[0][0])
assert text == expected
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.